diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java index 30d7ff6..226d4f6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java @@ -51,5 +51,5 @@ public interface CoprocessorEnvironment { * @return an interface for accessing the given table * @throws IOException */ - HTableInterface getTable(byte[] tableName) throws IOException; + HTableInterface getTable(TableName tableName) throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index d6fab55..9535e72 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -189,16 +189,15 @@ public class HRegionInfo implements Comparable { private byte [] encodedNameAsBytes = null; // Current TableName - private byte[] tableName = null; - private String tableNameAsString = null; + private TableName tableName = null; /** HRegionInfo for root region */ public static final HRegionInfo ROOT_REGIONINFO = - new HRegionInfo(0L, Bytes.toBytes("-ROOT-")); + new HRegionInfo(0L, TableName.ROOT_TABLE_NAME); /** HRegionInfo for first meta region */ public static final HRegionInfo FIRST_META_REGIONINFO = - new HRegionInfo(1L, Bytes.toBytes(".META.")); + new HRegionInfo(1L, TableName.META_TABLE_NAME); private void setHashCode() { int result = Arrays.hashCode(this.regionName); @@ -206,7 +205,7 @@ public class HRegionInfo implements Comparable { result ^= Arrays.hashCode(this.startKey); result ^= Arrays.hashCode(this.endKey); result ^= Boolean.valueOf(this.offLine).hashCode(); - result ^= Arrays.hashCode(this.tableName); + result ^= Arrays.hashCode(this.tableName.getName()); this.hashCode = result; } @@ -215,10 +214,10 @@ public class HRegionInfo implements Comparable { * Private constructor used constructing HRegionInfo for the * first meta regions */ - private HRegionInfo(long regionId, byte[] tableName) { + private HRegionInfo(long regionId, TableName tableName) { super(); this.regionId = regionId; - this.tableName = tableName.clone(); + this.tableName = tableName; // Note: First Meta regions names are still in old format this.regionName = createRegionName(tableName, null, regionId, false); @@ -234,7 +233,7 @@ public class HRegionInfo implements Comparable { super(); } - public HRegionInfo(final byte[] tableName) { + public HRegionInfo(final TableName tableName) { this(tableName, null, null); } @@ -246,7 +245,7 @@ public class HRegionInfo implements Comparable { * @param endKey end of key range * @throws IllegalArgumentException */ - public HRegionInfo(final byte[] tableName, final byte[] startKey, final byte[] endKey) + public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey) throws IllegalArgumentException { this(tableName, startKey, endKey, false); } @@ -262,7 +261,7 @@ public class HRegionInfo implements Comparable { * regions that may or may not hold references to this region. * @throws IllegalArgumentException */ - public HRegionInfo(final byte[] tableName, final byte[] startKey, final byte[] endKey, + public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, final boolean split) throws IllegalArgumentException { this(tableName, startKey, endKey, split, System.currentTimeMillis()); @@ -280,15 +279,15 @@ public class HRegionInfo implements Comparable { * @param regionid Region id to use. * @throws IllegalArgumentException */ - public HRegionInfo(final byte[] tableName, final byte[] startKey, + public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, final boolean split, final long regionid) throws IllegalArgumentException { super(); if (tableName == null) { - throw new IllegalArgumentException("tableName cannot be null"); + throw new IllegalArgumentException("TableName cannot be null"); } - this.tableName = tableName.clone(); + this.tableName = tableName; this.offLine = false; this.regionId = regionid; @@ -299,7 +298,7 @@ public class HRegionInfo implements Comparable { this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone(); this.startKey = startKey == null? HConstants.EMPTY_START_ROW: startKey.clone(); - this.tableName = tableName.clone(); + this.tableName = tableName; setHashCode(); } @@ -332,7 +331,7 @@ public class HRegionInfo implements Comparable { * (such that it contains its encoded name?). * @return Region name made of passed tableName, startKey and id */ - public static byte [] createRegionName(final byte [] tableName, + public static byte [] createRegionName(final TableName tableName, final byte [] startKey, final long regionid, boolean newFormat) { return createRegionName(tableName, startKey, Long.toString(regionid), newFormat); } @@ -346,7 +345,7 @@ public class HRegionInfo implements Comparable { * (such that it contains its encoded name?). * @return Region name made of passed tableName, startKey and id */ - public static byte [] createRegionName(final byte [] tableName, + public static byte [] createRegionName(final TableName tableName, final byte [] startKey, final String id, boolean newFormat) { return createRegionName(tableName, startKey, Bytes.toBytes(id), newFormat); } @@ -360,14 +359,14 @@ public class HRegionInfo implements Comparable { * (such that it contains its encoded name?). * @return Region name made of passed tableName, startKey and id */ - public static byte [] createRegionName(final byte [] tableName, + public static byte [] createRegionName(final TableName tableName, final byte [] startKey, final byte [] id, boolean newFormat) { - byte [] b = new byte [tableName.length + 2 + id.length + + byte [] b = new byte [tableName.getName().length + 2 + id.length + (startKey == null? 0: startKey.length) + (newFormat ? (MD5_HEX_LENGTH + 2) : 0)]; - int offset = tableName.length; - System.arraycopy(tableName, 0, b, 0, offset); + int offset = tableName.getName().length; + System.arraycopy(tableName.getName(), 0, b, 0, offset); b[offset++] = HConstants.DELIMITER; if (startKey != null && startKey.length > 0) { System.arraycopy(startKey, 0, b, offset, startKey.length); @@ -408,7 +407,7 @@ public class HRegionInfo implements Comparable { * @param regionName * @return Table name. */ - public static byte [] getTableName(byte [] regionName) { + public static TableName getTableName(byte[] regionName) { int offset = -1; for (int i = 0; i < regionName.length; i++) { if (regionName[i] == HConstants.DELIMITER) { @@ -416,9 +415,9 @@ public class HRegionInfo implements Comparable { break; } } - byte [] tableName = new byte[offset]; - System.arraycopy(regionName, 0, tableName, 0, offset); - return tableName; + byte[] buff = new byte[offset]; + System.arraycopy(regionName, 0, buff, 0, offset); + return TableName.valueOf(buff); } /** @@ -446,7 +445,7 @@ public class HRegionInfo implements Comparable { } } if(offset == -1) throw new IOException("Invalid regionName format"); - byte [] tableName = new byte[offset]; + byte[] tableName = new byte[offset]; System.arraycopy(regionName, 0, tableName, 0, offset); offset = -1; for (int i = regionName.length - 1; i > 0; i--) { @@ -529,25 +528,14 @@ public class HRegionInfo implements Comparable { * Get current table name of the region * @return byte array of table name */ - public byte[] getTableName() { - if (tableName == null || tableName.length == 0) { + public TableName getTableName() { + if (tableName == null || tableName.getName().length == 0) { tableName = getTableName(getRegionName()); } return tableName; } /** - * Get current table name as string - * @return string representation of current table - */ - public String getTableNameAsString() { - if (tableNameAsString == null) { - tableNameAsString = Bytes.toString(tableName); - } - return tableNameAsString; - } - - /** * Returns true if the given inclusive range of rows is fully contained * by this region. For example, if the region is foo,a,g and this is * passed ["b","c"] or ["a","c"] it will return true, but if this is passed @@ -586,7 +574,7 @@ public class HRegionInfo implements Comparable { /** @return true if this region is a meta region */ public boolean isMetaRegion() { - return Bytes.equals(tableName, HRegionInfo.FIRST_META_REGIONINFO.getTableName()); + return tableName.equals(HRegionInfo.FIRST_META_REGIONINFO.getTableName()); } /** @@ -690,7 +678,7 @@ public class HRegionInfo implements Comparable { Bytes.writeByteArray(out, regionName); out.writeBoolean(split); Bytes.writeByteArray(out, startKey); - Bytes.writeByteArray(out, tableName); + Bytes.writeByteArray(out, tableName.getName()); out.writeInt(hashCode); } @@ -717,7 +705,7 @@ public class HRegionInfo implements Comparable { try { HTableDescriptor htd = new HTableDescriptor(); htd.readFields(in); - this.tableName = htd.getName(); + this.tableName = htd.getTableName(); } catch(EOFException eofe) { throw new IOException("HTD not found in input buffer", eofe); } @@ -730,7 +718,7 @@ public class HRegionInfo implements Comparable { this.regionNameStr = Bytes.toStringBinary(this.regionName); this.split = in.readBoolean(); this.startKey = Bytes.readByteArray(in); - this.tableName = Bytes.readByteArray(in); + this.tableName = TableName.valueOf(Bytes.readByteArray(in)); this.hashCode = in.readInt(); } else { throw new IOException("Non-migratable/unknown version=" + getVersion()); @@ -762,7 +750,7 @@ public class HRegionInfo implements Comparable { } // Are regions of same table? - int result = Bytes.compareTo(this.tableName, o.tableName); + int result = this.tableName.compareTo(o.tableName); if (result != 0) { return result; } @@ -829,7 +817,7 @@ public class HRegionInfo implements Comparable { public static RegionInfo convert(final HRegionInfo info) { if (info == null) return null; RegionInfo.Builder builder = RegionInfo.newBuilder(); - builder.setTableName(ByteString.copyFrom(info.getTableName())); + builder.setTableName(ProtobufUtil.toProtoTableName(info.getTableName())); builder.setRegionId(info.getRegionId()); if (info.getStartKey() != null) { builder.setStartKey(ByteString.copyFrom(info.getStartKey())); @@ -846,12 +834,13 @@ public class HRegionInfo implements Comparable { * Convert a RegionInfo to a HRegionInfo * * @param proto the RegionInfo to convert - * @return the converted HRegionInfo + * @return the converted HRegionInfho */ public static HRegionInfo convert(final RegionInfo proto) { if (proto == null) return null; - byte [] tableName = proto.getTableName().toByteArray(); - if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { + TableName tableName = + ProtobufUtil.toTableName(proto.getTableName()); + if (tableName.equals(TableName.META_TABLE_NAME)) { return FIRST_META_REGIONINFO; } long regionId = proto.getRegionId(); @@ -867,7 +856,10 @@ public class HRegionInfo implements Comparable { if (proto.hasSplit()) { split = proto.getSplit(); } - HRegionInfo hri = new HRegionInfo(tableName, startKey, endKey, split, regionId); + HRegionInfo hri = new HRegionInfo( + tableName, + startKey, + endKey, split, regionId); if (proto.hasOffline()) { hri.setOffline(proto.getOffline()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 43f8732..09ae33d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -78,9 +78,7 @@ public class HTableDescriptor implements WritableComparable { */ private static final byte TABLE_DESCRIPTOR_VERSION = 7; - private byte [] name = HConstants.EMPTY_BYTE_ARRAY; - - private String nameAsString = ""; + private TableName name = null; /** * A map which holds the metadata information of the table. This metadata @@ -260,10 +258,8 @@ public class HTableDescriptor implements WritableComparable { * INTERNAL Private constructor used internally creating table descriptors for * catalog tables, .META. and -ROOT-. */ - protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families) { - this.name = name.clone(); - this.nameAsString = Bytes.toString(this.name); - setMetaFlags(name); + protected HTableDescriptor(final TableName name, HColumnDescriptor[] families) { + setName(name); for(HColumnDescriptor descriptor : families) { this.families.put(descriptor.getName(), descriptor); } @@ -273,11 +269,9 @@ public class HTableDescriptor implements WritableComparable { * INTERNAL Private constructor used internally creating table descriptors for * catalog tables, .META. and -ROOT-. */ - protected HTableDescriptor(final byte [] name, HColumnDescriptor[] families, + protected HTableDescriptor(final TableName name, HColumnDescriptor[] families, Map values) { - this.name = name.clone(); - this.nameAsString = Bytes.toString(this.name); - setMetaFlags(name); + setName(name); for(HColumnDescriptor descriptor : families) { this.families.put(descriptor.getName(), descriptor); } @@ -290,7 +284,6 @@ public class HTableDescriptor implements WritableComparable { /** * Default constructor which constructs an empty object. * For deserializing an HTableDescriptor instance only. - * @see #HTableDescriptor(byte[]) * @deprecated Used by Writables and Writables are going away. */ @Deprecated @@ -299,30 +292,33 @@ public class HTableDescriptor implements WritableComparable { } /** - * Construct a table descriptor specifying table name. + * Construct a table descriptor specifying a TableName object * @param name Table name. - * @throws IllegalArgumentException if passed a table name - * that is made of other than 'word' characters, underscore or period: i.e. - * [a-zA-Z_0-9.]. * @see HADOOP-1581 HBASE: Un-openable tablename bug */ - public HTableDescriptor(final String name) { - this(Bytes.toBytes(name)); + public HTableDescriptor(final TableName name) { + super(); + setName(name); } /** * Construct a table descriptor specifying a byte array table name - * @param name - Table name as a byte array. - * @throws IllegalArgumentException if passed a table name - * that is made of other than 'word' characters, underscore or period: i.e. - * [a-zA-Z_0-9-.]. + * @param name Table name. * @see HADOOP-1581 HBASE: Un-openable tablename bug */ - public HTableDescriptor(final byte [] name) { - super(); - setMetaFlags(this.name); - this.name = this.isMetaRegion()? name: isLegalTableName(name); - this.nameAsString = Bytes.toString(this.name); + @Deprecated + public HTableDescriptor(final byte[] name) { + this(TableName.valueOf(name)); + } + + /** + * Construct a table descriptor specifying a String table name + * @param name Table name. + * @see HADOOP-1581 HBASE: Un-openable tablename bug + */ + @Deprecated + public HTableDescriptor(final String name) { + this(TableName.valueOf(name)); } /** @@ -334,8 +330,7 @@ public class HTableDescriptor implements WritableComparable { */ public HTableDescriptor(final HTableDescriptor desc) { super(); - this.name = desc.name.clone(); - this.nameAsString = Bytes.toString(this.name); + setName(desc.name); setMetaFlags(this.name); for (HColumnDescriptor c: desc.families.values()) { this.families.put(c.getName(), new HColumnDescriptor(c)); @@ -356,10 +351,9 @@ public class HTableDescriptor implements WritableComparable { * Called by constructors. * @param name */ - private void setMetaFlags(final byte [] name) { - setRootRegion(Bytes.equals(name, HConstants.ROOT_TABLE_NAME)); + private void setMetaFlags(final TableName name) { setMetaRegion(isRootRegion() || - Bytes.equals(name, HConstants.META_TABLE_NAME)); + name.equals(TableName.META_TABLE_NAME)); } /** @@ -387,10 +381,10 @@ public class HTableDescriptor implements WritableComparable { } /** - * Checks if this table is either -ROOT- or .META. + * Checks if this table is .META. * region. * - * @return true if this is either a -ROOT- or .META. + * @return true if this table is .META. * region */ public boolean isMetaRegion() { @@ -436,56 +430,15 @@ public class HTableDescriptor implements WritableComparable { } /** - * Checks of the tableName being passed represents either - * -ROOT- or .META. + * Checks of the tableName being passed is a system table + * * - * @return true if a tablesName is either -ROOT- - * or .META. + * @return true if a tableName is a member of the system + * namesapce (aka hbase) */ - public static boolean isMetaTable(final byte [] tableName) { - return Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME) || - Bytes.equals(tableName, HConstants.META_TABLE_NAME); - } - - // A non-capture group so that this can be embedded. - public static final String VALID_USER_TABLE_REGEX = "(?:[a-zA-Z_0-9][a-zA-Z_0-9.-]*)"; - - /** - * Check passed byte buffer, "tableName", is legal user-space table name. - * @return Returns passed tableName param - * @throws NullPointerException If passed tableName is null - * @throws IllegalArgumentException if passed a tableName - * that is made of other than 'word' characters or underscores: i.e. - * [a-zA-Z_0-9]. - */ - public static byte [] isLegalTableName(final byte [] tableName) { - if (tableName == null || tableName.length <= 0) { - throw new IllegalArgumentException("Name is null or empty"); - } - if (tableName[0] == '.' || tableName[0] == '-') { - throw new IllegalArgumentException("Illegal first character <" + tableName[0] + - "> at 0. User-space table names can only start with 'word " + - "characters': i.e. [a-zA-Z_0-9]: " + Bytes.toString(tableName)); - } - if (HConstants.CLUSTER_ID_FILE_NAME.equalsIgnoreCase(Bytes - .toString(tableName)) - || HConstants.SPLIT_LOGDIR_NAME.equalsIgnoreCase(Bytes - .toString(tableName)) - || HConstants.VERSION_FILE_NAME.equalsIgnoreCase(Bytes - .toString(tableName))) { - throw new IllegalArgumentException(Bytes.toString(tableName) - + " conflicted with system reserved words"); - } - for (int i = 0; i < tableName.length; i++) { - if (Character.isLetterOrDigit(tableName[i]) || tableName[i] == '_' || - tableName[i] == '-' || tableName[i] == '.') { - continue; - } - throw new IllegalArgumentException("Illegal character <" + tableName[i] + - "> at " + i + ". User-space table names can only contain " + - "'word characters': i.e. [a-zA-Z_0-9-.]: " + Bytes.toString(tableName)); - } - return tableName; + public static boolean isSystemTable(final TableName tableName) { + return tableName.getNamespaceAsString() + .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); } /** @@ -710,12 +663,21 @@ public class HTableDescriptor implements WritableComparable { } /** + * Get the name of the table + * + * @return TableName + */ + public TableName getTableName() { + return name; + } + + /** * Get the name of the table as a byte array. * * @return name of table */ - public byte [] getName() { - return name; + public byte[] getName() { + return name.getName(); } /** @@ -724,7 +686,7 @@ public class HTableDescriptor implements WritableComparable { * @return name of table as a String */ public String getNameAsString() { - return this.nameAsString; + return name.getNameAsString(); } /** @@ -744,9 +706,14 @@ public class HTableDescriptor implements WritableComparable { * * @param name name of table */ + @Deprecated public void setName(byte[] name) { + setName(TableName.valueOf(name)); + } + + @Deprecated + public void setName(TableName name) { this.name = name; - this.nameAsString = Bytes.toString(this.name); setMetaFlags(this.name); } @@ -839,7 +806,7 @@ public class HTableDescriptor implements WritableComparable { @Override public String toString() { StringBuilder s = new StringBuilder(); - s.append('\'').append(Bytes.toString(name)).append('\''); + s.append('\'').append(Bytes.toString(name.getName())).append('\''); s.append(getValues(true)); for (HColumnDescriptor f : families.values()) { s.append(", ").append(f); @@ -853,7 +820,7 @@ public class HTableDescriptor implements WritableComparable { */ public String toStringCustomizedValues() { StringBuilder s = new StringBuilder(); - s.append('\'').append(Bytes.toString(name)).append('\''); + s.append('\'').append(Bytes.toString(name.getName())).append('\''); s.append(getValues(false)); for(HColumnDescriptor hcd : families.values()) { s.append(", ").append(hcd.toStringCustomizedValues()); @@ -978,7 +945,7 @@ public class HTableDescriptor implements WritableComparable { */ @Override public int hashCode() { - int result = Bytes.hashCode(this.name); + int result = this.name.hashCode(); result ^= Byte.valueOf(TABLE_DESCRIPTOR_VERSION).hashCode(); if (this.families != null && this.families.size() > 0) { for (HColumnDescriptor e: this.families.values()) { @@ -1002,8 +969,7 @@ public class HTableDescriptor implements WritableComparable { if (version < 3) throw new IOException("versions < 3 are not supported (and never existed!?)"); // version 3+ - name = Bytes.readByteArray(in); - nameAsString = Bytes.toString(this.name); + name = TableName.valueOf(Bytes.readByteArray(in)); setRootRegion(in.readBoolean()); setMetaRegion(in.readBoolean()); values.clear(); @@ -1046,8 +1012,8 @@ public class HTableDescriptor implements WritableComparable { @Deprecated @Override public void write(DataOutput out) throws IOException { - out.writeInt(TABLE_DESCRIPTOR_VERSION); - Bytes.writeByteArray(out, name); + out.writeInt(TABLE_DESCRIPTOR_VERSION); + Bytes.writeByteArray(out, name.toBytes()); out.writeBoolean(isRootRegion()); out.writeBoolean(isMetaRegion()); out.writeInt(values.size()); @@ -1080,7 +1046,7 @@ public class HTableDescriptor implements WritableComparable { */ @Override public int compareTo(final HTableDescriptor other) { - int result = Bytes.compareTo(this.name, other.name); + int result = this.name.compareTo(other.name); if (result == 0) { result = families.size() - other.families.size(); } @@ -1350,17 +1316,24 @@ public class HTableDescriptor implements WritableComparable { * Returns the {@link Path} object representing the table directory under * path rootdir * + * Deprecated use FSUtils.getTableDir() instead. + * * @param rootdir qualified path of HBase root directory * @param tableName name of table * @return {@link Path} for table */ + @Deprecated public static Path getTableDir(Path rootdir, final byte [] tableName) { - return new Path(rootdir, Bytes.toString(tableName)); + //This is bad I had to mirror code from FSUTils.getTableDir since + //there is no module dependency between hbase-client and hbase-server + TableName name = TableName.valueOf(tableName); + return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR, + new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString())))); } /** Table descriptor for -ROOT- catalog table */ public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor( - HConstants.ROOT_TABLE_NAME, + TableName.ROOT_TABLE_NAME, new HColumnDescriptor[] { new HColumnDescriptor(HConstants.CATALOG_FAMILY) // Ten is arbitrary number. Keep versions to help debugging. @@ -1373,7 +1346,8 @@ public class HTableDescriptor implements WritableComparable { /** Table descriptor for .META. catalog table */ public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( - HConstants.META_TABLE_NAME, new HColumnDescriptor[] { + TableName.META_TABLE_NAME, + new HColumnDescriptor[] { new HColumnDescriptor(HConstants.CATALOG_FAMILY) // Ten is arbitrary number. Keep versions to help debugging. .setMaxVersions(10) @@ -1395,6 +1369,21 @@ public class HTableDescriptor implements WritableComparable { } } + public final static String NAMESPACE_FAMILY_INFO = "info"; + public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO); + public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d"); + + /** Table descriptor for namespace table */ + public static final HTableDescriptor NAMESPACE_TABLEDESC = new HTableDescriptor( + TableName.NAMESPACE_TABLE_NAME, + new HColumnDescriptor[] { + new HColumnDescriptor(NAMESPACE_FAMILY_INFO) + // Ten is arbitrary number. Keep versions to help debugging. + .setMaxVersions(10) + .setInMemory(true) + .setBlocksize(8 * 1024) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + }); @Deprecated public void setOwner(User owner) { @@ -1458,7 +1447,7 @@ public class HTableDescriptor implements WritableComparable { */ public TableSchema convert() { TableSchema.Builder builder = TableSchema.newBuilder(); - builder.setName(ByteString.copyFrom(getName())); + builder.setTableName(ProtobufUtil.toProtoTableName(getTableName())); for (Map.Entry e: this.values.entrySet()) { BytesBytesPair.Builder aBuilder = BytesBytesPair.newBuilder(); aBuilder.setFirst(ByteString.copyFrom(e.getKey().get())); @@ -1488,7 +1477,9 @@ public class HTableDescriptor implements WritableComparable { for (ColumnFamilySchema cfs: list) { hcds[index++] = HColumnDescriptor.convert(cfs); } - HTableDescriptor htd = new HTableDescriptor(ts.getName().toByteArray(), hcds); + HTableDescriptor htd = new HTableDescriptor( + ProtobufUtil.toTableName(ts.getTableName()), + hcds); for (BytesBytesPair a: ts.getAttributesList()) { htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java index 4189acc..acd0e1e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java @@ -16,6 +16,7 @@ package org.apache.hadoop.hbase; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.TableName; /** * Thrown when a table exists but should not @@ -37,4 +38,8 @@ public class TableExistsException extends DoNotRetryIOException { public TableExistsException(String s) { super(s); } + + public TableExistsException(TableName t) { + this(t.getNameAsString()); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java index 5e4546b..f3ec12e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; /** @@ -48,4 +49,11 @@ public class TableNotDisabledException extends DoNotRetryIOException { public TableNotDisabledException(byte[] tableName) { this(Bytes.toString(tableName)); } + + /** + * @param tableName Name of table that is not disabled + */ + public TableNotDisabledException(TableName tableName) { + this(tableName.getNameAsString()); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java index a1ee149..c28ec77 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; @@ -46,6 +47,13 @@ public class TableNotEnabledException extends DoNotRetryIOException { /** * @param tableName Name of table that is not enabled */ + public TableNotEnabledException(TableName tableName) { + this(tableName.getNameAsString()); + } + + /** + * @param tableName Name of table that is not enabled + */ public TableNotEnabledException(byte[] tableName) { this(Bytes.toString(tableName)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java index d4ca300..1c4d829 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java @@ -20,6 +20,8 @@ package org.apache.hadoop.hbase; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.util.Bytes; /** Thrown when a table can not be located */ @InterfaceAudience.Public @@ -36,4 +38,12 @@ public class TableNotFoundException extends DoNotRetryIOException { public TableNotFoundException(String s) { super(s); } + + public TableNotFoundException(byte[] tableName) { + super(Bytes.toString(tableName)); + } + + public TableNotFoundException(TableName tableName) { + super(tableName.getNameAsString()); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java index 2437198..b0e69fb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java @@ -21,6 +21,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -72,7 +73,7 @@ public class MetaReader { * @throws IOException */ public static Map fullScan( - CatalogTracker catalogTracker, final Set disabledTables) + CatalogTracker catalogTracker, final Set disabledTables) throws IOException { return fullScan(catalogTracker, disabledTables, false); } @@ -90,7 +91,7 @@ public class MetaReader { * @throws IOException */ public static Map fullScan( - CatalogTracker catalogTracker, final Set disabledTables, + CatalogTracker catalogTracker, final Set disabledTables, final boolean excludeOfflinedSplitParents) throws IOException { final Map regions = @@ -102,9 +103,9 @@ public class MetaReader { Pair region = HRegionInfo.getHRegionInfoAndServerName(r); HRegionInfo hri = region.getFirst(); if (hri == null) return true; - if (hri.getTableNameAsString() == null) return true; + if (hri.getTableName() == null) return true; if (disabledTables.contains( - hri.getTableNameAsString())) return true; + hri.getTableName())) return true; // Are we to include split parents in the list? if (excludeOfflinedSplitParents && hri.isSplitParent()) return true; regions.put(hri, region.getSecond()); @@ -159,7 +160,7 @@ public class MetaReader { * @throws IOException */ private static HTable getHTable(final CatalogTracker catalogTracker, - final byte [] tableName) + final TableName tableName) throws IOException { // Passing the CatalogTracker's connection configuration ensures this // HTable instance uses the CatalogTracker's connection. @@ -187,7 +188,7 @@ public class MetaReader { */ static HTable getMetaHTable(final CatalogTracker ct) throws IOException { - return getHTable(ct, HConstants.META_TABLE_NAME); + return getHTable(ct, TableName.META_TABLE_NAME); } /** @@ -274,13 +275,12 @@ public class MetaReader { * @throws IOException */ public static boolean tableExists(CatalogTracker catalogTracker, - String tableName) + final TableName tableName) throws IOException { - if (tableName.equals(HTableDescriptor.META_TABLEDESC.getNameAsString())) { + if (tableName.equals(HTableDescriptor.META_TABLEDESC.getTableName())) { // Catalog tables always exist. return true; } - final byte [] tableNameBytes = Bytes.toBytes(tableName); // Make a version of ResultCollectingVisitor that only collects the first CollectingVisitor visitor = new CollectingVisitor() { private HRegionInfo current = null; @@ -293,7 +293,7 @@ public class MetaReader { LOG.warn("No serialized HRegionInfo in " + r); return true; } - if (!isInsideTable(this.current, tableNameBytes)) return false; + if (!isInsideTable(this.current, tableName)) return false; // Else call super and add this Result to the collection. super.visit(r); // Stop collecting regions from table after we get one. @@ -306,7 +306,7 @@ public class MetaReader { this.results.add(this.current); } }; - fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableNameBytes)); + fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableName)); // If visitor has results >= 1 then table exists. return visitor.getResults().size() >= 1; } @@ -319,7 +319,7 @@ public class MetaReader { * @throws IOException */ public static List getTableRegions(CatalogTracker catalogTracker, - byte [] tableName) + TableName tableName) throws IOException { return getTableRegions(catalogTracker, tableName, false); } @@ -334,7 +334,7 @@ public class MetaReader { * @throws IOException */ public static List getTableRegions(CatalogTracker catalogTracker, - byte [] tableName, final boolean excludeOfflinedSplitParents) + TableName tableName, final boolean excludeOfflinedSplitParents) throws IOException { List> result = null; try { @@ -361,8 +361,8 @@ public class MetaReader { * @return True if current tablename is equal to * tableName */ - static boolean isInsideTable(final HRegionInfo current, final byte [] tableName) { - return Bytes.equals(tableName, current.getTableName()); + static boolean isInsideTable(final HRegionInfo current, final TableName tableName) { + return tableName.equals(current.getTableName()); } /** @@ -370,9 +370,9 @@ public class MetaReader { * @return Place to start Scan in .META. when passed a * tableName; returns <tableName&rt; <,&rt; <,&rt; */ - static byte [] getTableStartRowForMeta(final byte [] tableName) { - byte [] startRow = new byte[tableName.length + 2]; - System.arraycopy(tableName, 0, startRow, 0, tableName.length); + static byte [] getTableStartRowForMeta(TableName tableName) { + byte [] startRow = new byte[tableName.getName().length + 2]; + System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length); startRow[startRow.length - 2] = HConstants.DELIMITER; startRow[startRow.length - 1] = HConstants.DELIMITER; return startRow; @@ -387,8 +387,8 @@ public class MetaReader { * @param tableName bytes of table's name * @return configured Scan object */ - public static Scan getScanForTableName(byte[] tableName) { - String strName = Bytes.toString(tableName); + public static Scan getScanForTableName(TableName tableName) { + String strName = tableName.getNameAsString(); // Start key is just the table name with delimiters byte[] startKey = Bytes.toBytes(strName + ",,"); // Stop key appends the smallest possible char to the table name @@ -407,9 +407,9 @@ public class MetaReader { * @throws InterruptedException */ public static List> - getTableRegionsAndLocations(CatalogTracker catalogTracker, String tableName) + getTableRegionsAndLocations(CatalogTracker catalogTracker, TableName tableName) throws IOException, InterruptedException { - return getTableRegionsAndLocations(catalogTracker, Bytes.toBytes(tableName), + return getTableRegionsAndLocations(catalogTracker, tableName, true); } @@ -422,9 +422,9 @@ public class MetaReader { */ public static List> getTableRegionsAndLocations(final CatalogTracker catalogTracker, - final byte [] tableName, final boolean excludeOfflinedSplitParents) + final TableName tableName, final boolean excludeOfflinedSplitParents) throws IOException, InterruptedException { - if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { + if (tableName.equals(TableName.META_TABLE_NAME)) { // If meta, do a bit of special handling. ServerName serverName = catalogTracker.getMetaLocation(); List> list = diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index 844bb5d..d70a65d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -23,6 +23,9 @@ package org.apache.hadoop.hbase.client; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; @@ -85,7 +88,7 @@ import java.util.concurrent.atomic.AtomicLong; class AsyncProcess { private static final Log LOG = LogFactory.getLog(AsyncProcess.class); protected final HConnection hConnection; - protected final byte[] tableName; + protected final TableName tableName; protected final ExecutorService pool; protected final AsyncProcessCallback callback; protected final BatchErrors errors = new BatchErrors(); @@ -167,7 +170,7 @@ class AsyncProcess { } } - public AsyncProcess(HConnection hc, byte[] tableName, ExecutorService pool, + public AsyncProcess(HConnection hc, TableName tableName, ExecutorService pool, AsyncProcessCallback callback, Configuration conf, RpcRetryingCallerFactory rpcCaller) { this.hConnection = hc; @@ -288,7 +291,7 @@ class AsyncProcess { loc = hConnection.locateRegion(this.tableName, row.getRow()); if (loc == null) { locationException = new IOException("No location found, aborting submit for" + - " tableName=" + Bytes.toString(tableName) + + " tableName=" + tableName + " rowkey=" + Arrays.toString(row.getRow())); } } catch (IOException e) { @@ -530,7 +533,7 @@ class AsyncProcess { if (toReplay.isEmpty()) { LOG.warn("Attempt #" + numAttempt + "/" + numTries + " failed for all (" + initialActions.size() + ") operations on server " + location.getServerName() + - " NOT resubmitting, tableName=" + Bytes.toString(tableName) + ", location=" + location); + " NOT resubmitting, tableName=" + tableName + ", location=" + location); } else { submit(initialActions, toReplay, numAttempt, true, errorsByServer); } @@ -553,7 +556,7 @@ class AsyncProcess { if (responses == null) { LOG.info("Attempt #" + numAttempt + "/" + numTries + " failed for all operations" + " on server " + location.getServerName() + " , trying to resubmit," + - " tableName=" + Bytes.toString(tableName) + ", location=" + location); + " tableName=" + tableName + ", location=" + location); resubmitAll(initialActions, rsActions, location, numAttempt + 1, null, errorsByServer); return; } @@ -614,7 +617,7 @@ class AsyncProcess { // logs as errors are to be expected wehn region moves, split and so on LOG.debug("Attempt #" + numAttempt + "/" + numTries + " failed for " + failureCount + " operations on server " + location.getServerName() + ", resubmitting " + - toReplay.size() + ", tableName=" + Bytes.toString(tableName) + ", location=" + + toReplay.size() + ", tableName=" + tableName + ", location=" + location + ", last exception was: " + throwable + " - sleeping " + backOffTime + " ms."); } @@ -622,7 +625,7 @@ class AsyncProcess { Thread.sleep(backOffTime); } catch (InterruptedException e) { LOG.warn("Not sent: " + toReplay.size() + - " operations, tableName=" + Bytes.toString(tableName) + ", location=" + location, e); + " operations, tableName=" + tableName + ", location=" + location, e); Thread.interrupted(); return; } @@ -631,7 +634,7 @@ class AsyncProcess { } else if (failureCount != 0) { LOG.warn("Attempt #" + numAttempt + "/" + numTries + " failed for " + failureCount + " operations on server " + location.getServerName() + " NOT resubmitting." + - ", tableName=" + Bytes.toString(tableName) + ", location=" + location); + ", tableName=" + tableName + ", location=" + location); } } @@ -648,7 +651,7 @@ class AsyncProcess { } catch (InterruptedException e) { throw new InterruptedIOException("Interrupted." + " currentNumberOfTask=" + currentNumberOfTask + - ", tableName=" + Bytes.toString(tableName) + ", tasksDone=" + tasksDone.get()); + ", tableName=" + tableName + ", tasksDone=" + tasksDone.get()); } } } @@ -666,7 +669,7 @@ class AsyncProcess { lastLog = now; LOG.info(": Waiting for the global number of running tasks to be equals or less than " + max + ", tasksSent=" + tasksSent.get() + ", tasksDone=" + tasksDone.get() + - ", currentTasksDone=" + currentTasksDone + ", tableName=" + Bytes.toString(tableName)); + ", currentTasksDone=" + currentTasksDone + ", tableName=" + tableName); } waitForNextTaskDone(currentTasksDone); currentTasksDone = this.tasksDone.get(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java index 4fdc6ce..b6b6601 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java @@ -26,6 +26,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -63,7 +64,7 @@ public class ClientScanner extends AbstractClientScanner { private ScanMetrics scanMetrics = null; private final long maxScannerResultSize; private final HConnection connection; - private final byte[] tableName; + private final TableName tableName; private final int scannerTimeout; private boolean scanMetricsPublished = false; private RpcRetryingCaller caller; @@ -79,7 +80,7 @@ public class ClientScanner extends AbstractClientScanner { * @throws IOException */ public ClientScanner(final Configuration conf, final Scan scan, - final byte[] tableName) throws IOException { + final TableName tableName) throws IOException { this(conf, scan, tableName, HConnectionManager.getConnection(conf)); } @@ -94,7 +95,7 @@ public class ClientScanner extends AbstractClientScanner { * @param connection Connection identifying the cluster * @throws IOException */ - public ClientScanner(final Configuration conf, final Scan scan, final byte[] tableName, + public ClientScanner(final Configuration conf, final Scan scan, final TableName tableName, HConnection connection) throws IOException { this(conf, scan, tableName, connection, new RpcRetryingCallerFactory(conf)); } @@ -108,10 +109,10 @@ public class ClientScanner extends AbstractClientScanner { * @param connection Connection identifying the cluster * @throws IOException */ - public ClientScanner(final Configuration conf, final Scan scan, final byte[] tableName, + public ClientScanner(final Configuration conf, final Scan scan, final TableName tableName, HConnection connection, RpcRetryingCallerFactory rpcFactory) throws IOException { if (LOG.isTraceEnabled()) { - LOG.trace("Scan table=" + Bytes.toString(tableName) + LOG.trace("Scan table=" + tableName + ", startRow=" + Bytes.toStringBinary(scan.getStartRow())); } this.scan = scan; @@ -156,7 +157,7 @@ public class ClientScanner extends AbstractClientScanner { return this.connection; } - protected byte[] getTableName() { + protected TableName getTableName() { return this.tableName; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index a308cad..49065dd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -22,6 +22,7 @@ import java.io.Closeable; import java.io.IOException; import java.io.InterruptedIOException; import java.net.SocketTimeoutException; +import java.util.ArrayList; import java.util.Arrays; import java.util.LinkedList; import java.util.List; @@ -36,6 +37,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -43,6 +45,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionException; @@ -83,7 +86,10 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; @@ -248,7 +254,7 @@ public class HBaseAdmin implements Abortable, Closeable { * @return True if table exists already. * @throws IOException */ - public boolean tableExists(final String tableName) + public boolean tableExists(final TableName tableName) throws IOException { boolean b = false; CatalogTracker ct = getCatalogTracker(); @@ -260,14 +266,14 @@ public class HBaseAdmin implements Abortable, Closeable { return b; } - /** - * @param tableName Table to check. - * @return True if table exists already. - * @throws IOException - */ - public boolean tableExists(final byte [] tableName) + public boolean tableExists(final byte[] tableName) + throws IOException { + return tableExists(TableName.valueOf(tableName)); + } + + public boolean tableExists(final String tableName) throws IOException { - return tableExists(Bytes.toString(tableName)); + return tableExists(TableName.valueOf(tableName)); } /** @@ -296,7 +302,7 @@ public class HBaseAdmin implements Abortable, Closeable { List matched = new LinkedList(); HTableDescriptor[] tables = listTables(); for (HTableDescriptor table : tables) { - if (pattern.matcher(table.getNameAsString()).matches()) { + if (pattern.matcher(table.getTableName().getNameAsString()).matches()) { matched.add(table); } } @@ -323,11 +329,16 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws TableNotFoundException * @throws IOException if a remote or network exception occurs */ - public HTableDescriptor getTableDescriptor(final byte [] tableName) + public HTableDescriptor getTableDescriptor(final TableName tableName) throws TableNotFoundException, IOException { return this.connection.getHTableDescriptor(tableName); } + public HTableDescriptor getTableDescriptor(final byte[] tableName) + throws TableNotFoundException, IOException { + return getTableDescriptor(TableName.valueOf(tableName)); + } + private long getPauseTime(int tries) { int triesCount = tries; if (triesCount >= HConstants.RETRY_BACKOFF.length) { @@ -381,7 +392,6 @@ public class HBaseAdmin implements Abortable, Closeable { public void createTable(HTableDescriptor desc, byte [] startKey, byte [] endKey, int numRegions) throws IOException { - HTableDescriptor.isLegalTableName(desc.getName()); if(numRegions < 3) { throw new IllegalArgumentException("Must create at least three regions"); } else if(Bytes.compareTo(startKey, endKey) >= 0) { @@ -417,11 +427,10 @@ public class HBaseAdmin implements Abortable, Closeable { */ public void createTable(final HTableDescriptor desc, byte [][] splitKeys) throws IOException { - HTableDescriptor.isLegalTableName(desc.getName()); try { createTableAsync(desc, splitKeys); } catch (SocketTimeoutException ste) { - LOG.warn("Creating " + desc.getNameAsString() + " took too long", ste); + LOG.warn("Creating " + desc.getTableName() + " took too long", ste); } int numRegs = splitKeys == null ? 1 : splitKeys.length + 1; int prevRegCount = 0; @@ -439,7 +448,7 @@ public class HBaseAdmin implements Abortable, Closeable { LOG.warn("No serialized HRegionInfo in " + rowResult); return true; } - if (!(Bytes.equals(info.getTableName(), desc.getName()))) { + if (!info.getTableName().equals(desc.getTableName())) { return false; } ServerName serverName = HRegionInfo.getServerName(rowResult); @@ -451,7 +460,7 @@ public class HBaseAdmin implements Abortable, Closeable { return true; } }; - MetaScanner.metaScan(conf, connection, visitor, desc.getName()); + MetaScanner.metaScan(conf, connection, visitor, desc.getTableName()); if (actualRegCount.get() != numRegs) { if (tries == this.numRetries * this.retryLongerMultiplier - 1) { throw new RegionOfflineException("Only " + actualRegCount.get() + @@ -472,7 +481,7 @@ public class HBaseAdmin implements Abortable, Closeable { doneWithMetaScan = true; tries = -1; } - } else if (isTableEnabled(desc.getName())) { + } else if (isTableEnabled(desc.getTableName())) { return; } else { try { // Sleep @@ -485,7 +494,7 @@ public class HBaseAdmin implements Abortable, Closeable { } throw new TableNotEnabledException( "Retries exhausted while still waiting for table: " - + desc.getNameAsString() + " to be enabled"); + + desc.getTableName() + " to be enabled"); } /** @@ -507,7 +516,9 @@ public class HBaseAdmin implements Abortable, Closeable { public void createTableAsync( final HTableDescriptor desc, final byte [][] splitKeys) throws IOException { - HTableDescriptor.isLegalTableName(desc.getName()); + if(desc.getTableName() == null) { + throw new IllegalArgumentException("TableName cannot be null"); + } if(splitKeys != null && splitKeys.length > 0) { Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR); // Verify there are no duplicate split keys @@ -536,15 +547,12 @@ public class HBaseAdmin implements Abortable, Closeable { }); } - /** - * Deletes a table. - * Synchronous operation. - * - * @param tableName name of table to delete - * @throws IOException if a remote or network exception occurs - */ public void deleteTable(final String tableName) throws IOException { - deleteTable(Bytes.toBytes(tableName)); + deleteTable(TableName.valueOf(tableName)); + } + + public void deleteTable(final byte[] tableName) throws IOException { + deleteTable(TableName.valueOf(tableName)); } /** @@ -554,8 +562,7 @@ public class HBaseAdmin implements Abortable, Closeable { * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs */ - public void deleteTable(final byte [] tableName) throws IOException { - HTableDescriptor.isLegalTableName(tableName); + public void deleteTable(final TableName tableName) throws IOException { HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName); boolean tableExists = true; @@ -626,11 +633,11 @@ public class HBaseAdmin implements Abortable, Closeable { if (tableExists) { throw new IOException("Retries exhausted, it took too long to wait"+ - " for the table " + Bytes.toString(tableName) + " to be deleted."); + " for the table " + tableName + " to be deleted."); } // Delete cached information to prevent clients from using old locations this.connection.clearRegionCache(tableName); - LOG.info("Deleted " + Bytes.toString(tableName)); + LOG.info("Deleted " + tableName); } /** @@ -665,9 +672,9 @@ public class HBaseAdmin implements Abortable, Closeable { List failed = new LinkedList(); for (HTableDescriptor table : listTables(pattern)) { try { - deleteTable(table.getName()); + deleteTable(table.getTableName()); } catch (IOException ex) { - LOG.info("Failed to delete table " + table.getNameAsString(), ex); + LOG.info("Failed to delete table " + table.getTableName(), ex); failed.add(table); } } @@ -675,11 +682,6 @@ public class HBaseAdmin implements Abortable, Closeable { } - public void enableTable(final String tableName) - throws IOException { - enableTable(Bytes.toBytes(tableName)); - } - /** * Enable a table. May timeout. Use {@link #enableTableAsync(byte[])} * and {@link #isTableEnabled(byte[])} instead. @@ -693,14 +695,24 @@ public class HBaseAdmin implements Abortable, Closeable { * @see #disableTable(byte[]) * @see #enableTableAsync(byte[]) */ - public void enableTable(final byte [] tableName) + public void enableTable(final TableName tableName) throws IOException { enableTableAsync(tableName); // Wait until all regions are enabled waitUntilTableIsEnabled(tableName); - LOG.info("Enabled table " + Bytes.toString(tableName)); + LOG.info("Enabled table " + tableName); + } + + public void enableTable(final byte[] tableName) + throws IOException { + enableTable(TableName.valueOf(tableName)); + } + + public void enableTable(final String tableName) + throws IOException { + enableTable(TableName.valueOf(tableName)); } /** @@ -710,7 +722,7 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws IOException if a remote or network exception occurs or * table is not enabled after the retries period. */ - private void waitUntilTableIsEnabled(final byte[] tableName) throws IOException { + private void waitUntilTableIsEnabled(final TableName tableName) throws IOException { boolean enabled = false; long start = EnvironmentEdgeManager.currentTimeMillis(); for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) { @@ -721,7 +733,7 @@ public class HBaseAdmin implements Abortable, Closeable { long sleep = getPauseTime(tries); if (LOG.isDebugEnabled()) { LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " + - "enabled in " + Bytes.toString(tableName)); + "enabled in " + tableName); } try { Thread.sleep(sleep); @@ -734,16 +746,11 @@ public class HBaseAdmin implements Abortable, Closeable { } if (!enabled) { long msec = EnvironmentEdgeManager.currentTimeMillis() - start; - throw new IOException("Table '" + Bytes.toString(tableName) + + throw new IOException("Table '" + tableName + "' not yet enabled, after " + msec + "ms."); } } - public void enableTableAsync(final String tableName) - throws IOException { - enableTableAsync(Bytes.toBytes(tableName)); - } - /** * Brings a table on-line (enables it). Method returns immediately though * enable of table may take some time to complete, especially if the table @@ -754,13 +761,13 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws IOException * @since 0.90.0 */ - public void enableTableAsync(final byte [] tableName) + public void enableTableAsync(final TableName tableName) throws IOException { - HTableDescriptor.isLegalTableName(tableName); + TableName.isLegalFullyQualifiedTableName(tableName.getName()); executeCallable(new MasterAdminCallable(getConnection()) { @Override public Void call() throws ServiceException { - LOG.info("Started enable of " + Bytes.toString(tableName)); + LOG.info("Started enable of " + tableName); EnableTableRequest req = RequestConverter.buildEnableTableRequest(tableName); masterAdmin.enableTable(null,req); return null; @@ -768,6 +775,16 @@ public class HBaseAdmin implements Abortable, Closeable { }); } + public void enableTableAsync(final byte[] tableName) + throws IOException { + enableTable(TableName.valueOf(tableName)); + } + + public void enableTableAsync(final String tableName) + throws IOException { + enableTableAsync(TableName.valueOf(tableName)); + } + /** * Enable tables matching the passed in pattern and wait on completion. * @@ -797,11 +814,11 @@ public class HBaseAdmin implements Abortable, Closeable { public HTableDescriptor[] enableTables(Pattern pattern) throws IOException { List failed = new LinkedList(); for (HTableDescriptor table : listTables(pattern)) { - if (isTableDisabled(table.getName())) { + if (isTableDisabled(table.getTableName())) { try { - enableTable(table.getName()); + enableTable(table.getTableName()); } catch (IOException ex) { - LOG.info("Failed to enable table " + table.getNameAsString(), ex); + LOG.info("Failed to enable table " + table.getTableName(), ex); failed.add(table); } } @@ -809,10 +826,6 @@ public class HBaseAdmin implements Abortable, Closeable { return failed.toArray(new HTableDescriptor[failed.size()]); } - public void disableTableAsync(final String tableName) throws IOException { - disableTableAsync(Bytes.toBytes(tableName)); - } - /** * Starts the disable of a table. If it is being served, the master * will tell the servers to stop serving it. This method returns immediately. @@ -826,12 +839,12 @@ public class HBaseAdmin implements Abortable, Closeable { * @see #isTableEnabled(byte[]) * @since 0.90.0 */ - public void disableTableAsync(final byte [] tableName) throws IOException { - HTableDescriptor.isLegalTableName(tableName); + public void disableTableAsync(final TableName tableName) throws IOException { + TableName.isLegalFullyQualifiedTableName(tableName.getName()); executeCallable(new MasterAdminCallable(getConnection()) { @Override public Void call() throws ServiceException { - LOG.info("Started disable of " + Bytes.toString(tableName)); + LOG.info("Started disable of " + tableName); DisableTableRequest req = RequestConverter.buildDisableTableRequest(tableName); masterAdmin.disableTable(null,req); return null; @@ -839,9 +852,12 @@ public class HBaseAdmin implements Abortable, Closeable { }); } - public void disableTable(final String tableName) - throws IOException { - disableTable(Bytes.toBytes(tableName)); + public void disableTableAsync(final byte[] tableName) throws IOException { + disableTableAsync(TableName.valueOf(tableName)); + } + + public void disableTableAsync(final String tableName) throws IOException { + disableTableAsync(TableName.valueOf(tableName)); } /** @@ -855,7 +871,7 @@ public class HBaseAdmin implements Abortable, Closeable { * TableNotFoundException means the table doesn't exist. * TableNotEnabledException means the table isn't in enabled state. */ - public void disableTable(final byte [] tableName) + public void disableTable(final TableName tableName) throws IOException { disableTableAsync(tableName); // Wait until table is disabled @@ -868,7 +884,7 @@ public class HBaseAdmin implements Abortable, Closeable { long sleep = getPauseTime(tries); if (LOG.isDebugEnabled()) { LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " + - "disabled in " + Bytes.toString(tableName)); + "disabled in " + tableName); } try { Thread.sleep(sleep); @@ -881,9 +897,19 @@ public class HBaseAdmin implements Abortable, Closeable { } if (!disabled) { throw new RegionException("Retries exhausted, it took too long to wait"+ - " for the table " + Bytes.toString(tableName) + " to be disabled."); + " for the table " + tableName + " to be disabled."); } - LOG.info("Disabled " + Bytes.toString(tableName)); + LOG.info("Disabled " + tableName); + } + + public void disableTable(final byte[] tableName) + throws IOException { + disableTable(TableName.valueOf(tableName)); + } + + public void disableTable(final String tableName) + throws IOException { + disableTable(TableName.valueOf(tableName)); } /** @@ -917,11 +943,11 @@ public class HBaseAdmin implements Abortable, Closeable { public HTableDescriptor[] disableTables(Pattern pattern) throws IOException { List failed = new LinkedList(); for (HTableDescriptor table : listTables(pattern)) { - if (isTableEnabled(table.getName())) { + if (isTableEnabled(table.getTableName())) { try { - disableTable(table.getName()); + disableTable(table.getTableName()); } catch (IOException ex) { - LOG.info("Failed to disable table " + table.getNameAsString(), ex); + LOG.info("Failed to disable table " + table.getTableName(), ex); failed.add(table); } } @@ -934,74 +960,52 @@ public class HBaseAdmin implements Abortable, Closeable { * @return true if table is on-line * @throws IOException if a remote or network exception occurs */ - public boolean isTableEnabled(String tableName) throws IOException { - return isTableEnabled(Bytes.toBytes(tableName)); + public boolean isTableEnabled(TableName tableName) throws IOException { + return connection.isTableEnabled(tableName); } - /** - * @param tableName name of table to check - * @return true if table is on-line - * @throws IOException if a remote or network exception occurs - */ + public boolean isTableEnabled(byte[] tableName) throws IOException { - if (!HTableDescriptor.isMetaTable(tableName)) { - HTableDescriptor.isLegalTableName(tableName); - } - return connection.isTableEnabled(tableName); + return isTableEnabled(TableName.valueOf(tableName)); } - /** - * @param tableName name of table to check - * @return true if table is off-line - * @throws IOException if a remote or network exception occurs - */ - public boolean isTableDisabled(final String tableName) throws IOException { - return isTableDisabled(Bytes.toBytes(tableName)); + public boolean isTableEnabled(String tableName) throws IOException { + return isTableEnabled(TableName.valueOf(tableName)); } + + /** * @param tableName name of table to check * @return true if table is off-line * @throws IOException if a remote or network exception occurs */ - public boolean isTableDisabled(byte[] tableName) throws IOException { - if (!HTableDescriptor.isMetaTable(tableName)) { - HTableDescriptor.isLegalTableName(tableName); - } + public boolean isTableDisabled(TableName tableName) throws IOException { return connection.isTableDisabled(tableName); } + public boolean isTableDisabled(byte[] tableName) throws IOException { + return isTableDisabled(TableName.valueOf(tableName)); + } + + public boolean isTableDisabled(String tableName) throws IOException { + return isTableDisabled(TableName.valueOf(tableName)); + } + /** * @param tableName name of table to check * @return true if all regions of the table are available * @throws IOException if a remote or network exception occurs */ - public boolean isTableAvailable(byte[] tableName) throws IOException { + public boolean isTableAvailable(TableName tableName) throws IOException { return connection.isTableAvailable(tableName); } - /** - * @param tableName name of table to check - * @return true if all regions of the table are available - * @throws IOException if a remote or network exception occurs - */ - public boolean isTableAvailable(String tableName) throws IOException { - return connection.isTableAvailable(Bytes.toBytes(tableName)); + public boolean isTableAvailable(byte[] tableName) throws IOException { + return isTableAvailable(TableName.valueOf(tableName)); } - - /** - * Use this api to check if the table has been created with the specified number of - * splitkeys which was used while creating the given table. - * Note : If this api is used after a table's region gets splitted, the api may return - * false. - * @param tableName - * name of table to check - * @param splitKeys - * keys to check if the table has been created with all split keys - * @throws IOException - * if a remote or network excpetion occurs - */ - public boolean isTableAvailable(String tableName, byte[][] splitKeys) throws IOException { - return connection.isTableAvailable(Bytes.toBytes(tableName), splitKeys); + + public boolean isTableAvailable(String tableName) throws IOException { + return isTableAvailable(TableName.valueOf(tableName)); } /** @@ -1016,25 +1020,34 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws IOException * if a remote or network excpetion occurs */ - public boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws IOException { + public boolean isTableAvailable(TableName tableName, + byte[][] splitKeys) throws IOException { return connection.isTableAvailable(tableName, splitKeys); } + public boolean isTableAvailable(byte[] tableName, + byte[][] splitKeys) throws IOException { + return isTableAvailable(TableName.valueOf(tableName), splitKeys); + } + + public boolean isTableAvailable(String tableName, + byte[][] splitKeys) throws IOException { + return isTableAvailable(TableName.valueOf(tableName), splitKeys); + } + /** * Get the status of alter command - indicates how many regions have received * the updated schema Asynchronous operation. * - * @param tableName - * name of the table to get the status of + * @param tableName TableName instance * @return Pair indicating the number of regions updated Pair.getFirst() is the * regions that are yet to be updated Pair.getSecond() is the total number * of regions of the table * @throws IOException * if a remote or network exception occurs */ - public Pair getAlterStatus(final byte[] tableName) + public Pair getAlterStatus(final TableName tableName) throws IOException { - HTableDescriptor.isLegalTableName(tableName); return executeCallable(new MasterMonitorCallable>(getConnection()) { @Override public Pair call() throws ServiceException { @@ -1049,6 +1062,37 @@ public class HBaseAdmin implements Abortable, Closeable { } /** + * Get the status of alter command - indicates how many regions have received + * the updated schema Asynchronous operation. + * + * @param tableName + * name of the table to get the status of + * @return Pair indicating the number of regions updated Pair.getFirst() is the + * regions that are yet to be updated Pair.getSecond() is the total number + * of regions of the table + * @throws IOException + * if a remote or network exception occurs + */ + public Pair getAlterStatus(final byte[] tableName) + throws IOException { + return getAlterStatus(TableName.valueOf(tableName)); + } + + /** + * Add a column to an existing table. + * Asynchronous operation. + * + * @param tableName name of the table to add column to + * @param column column descriptor of column to be added + * @throws IOException if a remote or network exception occurs + */ + public void addColumn(final byte[] tableName, HColumnDescriptor column) + throws IOException { + addColumn(TableName.valueOf(tableName), column); + } + + + /** * Add a column to an existing table. * Asynchronous operation. * @@ -1058,7 +1102,7 @@ public class HBaseAdmin implements Abortable, Closeable { */ public void addColumn(final String tableName, HColumnDescriptor column) throws IOException { - addColumn(Bytes.toBytes(tableName), column); + addColumn(TableName.valueOf(tableName), column); } /** @@ -1069,7 +1113,7 @@ public class HBaseAdmin implements Abortable, Closeable { * @param column column descriptor of column to be added * @throws IOException if a remote or network exception occurs */ - public void addColumn(final byte [] tableName, final HColumnDescriptor column) + public void addColumn(final TableName tableName, final HColumnDescriptor column) throws IOException { executeCallable(new MasterAdminCallable(getConnection()) { @Override @@ -1089,9 +1133,22 @@ public class HBaseAdmin implements Abortable, Closeable { * @param columnName name of column to be deleted * @throws IOException if a remote or network exception occurs */ + public void deleteColumn(final byte[] tableName, final String columnName) + throws IOException { + deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName)); + } + + /** + * Delete a column from a table. + * Asynchronous operation. + * + * @param tableName name of table + * @param columnName name of column to be deleted + * @throws IOException if a remote or network exception occurs + */ public void deleteColumn(final String tableName, final String columnName) throws IOException { - deleteColumn(Bytes.toBytes(tableName), Bytes.toBytes(columnName)); + deleteColumn(TableName.valueOf(tableName), Bytes.toBytes(columnName)); } /** @@ -1102,7 +1159,7 @@ public class HBaseAdmin implements Abortable, Closeable { * @param columnName name of column to be deleted * @throws IOException if a remote or network exception occurs */ - public void deleteColumn(final byte [] tableName, final byte [] columnName) + public void deleteColumn(final TableName tableName, final byte [] columnName) throws IOException { executeCallable(new MasterAdminCallable(getConnection()) { @Override @@ -1124,7 +1181,20 @@ public class HBaseAdmin implements Abortable, Closeable { */ public void modifyColumn(final String tableName, HColumnDescriptor descriptor) throws IOException { - modifyColumn(Bytes.toBytes(tableName), descriptor); + modifyColumn(TableName.valueOf(tableName), descriptor); + } + + /** + * Modify an existing column family on a table. + * Asynchronous operation. + * + * @param tableName name of table + * @param descriptor new column descriptor to use + * @throws IOException if a remote or network exception occurs + */ + public void modifyColumn(final byte[] tableName, HColumnDescriptor descriptor) + throws IOException { + modifyColumn(TableName.valueOf(tableName), descriptor); } @@ -1137,7 +1207,7 @@ public class HBaseAdmin implements Abortable, Closeable { * @param descriptor new column descriptor to use * @throws IOException if a remote or network exception occurs */ - public void modifyColumn(final byte [] tableName, final HColumnDescriptor descriptor) + public void modifyColumn(final TableName tableName, final HColumnDescriptor descriptor) throws IOException { executeCallable(new MasterAdminCallable(getConnection()) { @Override @@ -1286,7 +1356,7 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ - public void flush(final byte [] tableNameOrRegionName) + public void flush(final byte[] tableNameOrRegionName) throws IOException, InterruptedException { CatalogTracker ct = getCatalogTracker(); try { @@ -1299,7 +1369,8 @@ public class HBaseAdmin implements Abortable, Closeable { flush(regionServerPair.getSecond(), regionServerPair.getFirst()); } } else { - final String tableName = tableNameString(tableNameOrRegionName, ct); + final TableName tableName = checkTableExists( + TableName.valueOf(tableNameOrRegionName), ct); List> pairs = MetaReader.getTableRegionsAndLocations(ct, tableName); @@ -1354,7 +1425,7 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ - public void compact(final byte [] tableNameOrRegionName) + public void compact(final byte[] tableNameOrRegionName) throws IOException, InterruptedException { compact(tableNameOrRegionName, null, false); } @@ -1382,7 +1453,7 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ - public void compact(final byte [] tableNameOrRegionName, final byte[] columnFamily) + public void compact(final byte[] tableNameOrRegionName, final byte[] columnFamily) throws IOException, InterruptedException { compact(tableNameOrRegionName, columnFamily, false); } @@ -1408,7 +1479,7 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ - public void majorCompact(final byte [] tableNameOrRegionName) + public void majorCompact(final byte[] tableNameOrRegionName) throws IOException, InterruptedException { compact(tableNameOrRegionName, null, true); } @@ -1437,7 +1508,7 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ - public void majorCompact(final byte [] tableNameOrRegionName, + public void majorCompact(final byte[] tableNameOrRegionName, final byte[] columnFamily) throws IOException, InterruptedException { compact(tableNameOrRegionName, columnFamily, true); } @@ -1452,7 +1523,7 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ - private void compact(final byte [] tableNameOrRegionName, + private void compact(final byte[] tableNameOrRegionName, final byte[] columnFamily,final boolean major) throws IOException, InterruptedException { CatalogTracker ct = getCatalogTracker(); @@ -1466,7 +1537,8 @@ public class HBaseAdmin implements Abortable, Closeable { compact(regionServerPair.getSecond(), regionServerPair.getFirst(), major, columnFamily); } } else { - final String tableName = tableNameString(tableNameOrRegionName, ct); + final TableName tableName = + checkTableExists(TableName.valueOf(tableNameOrRegionName), ct); List> pairs = MetaReader.getTableRegionsAndLocations(ct, tableName); @@ -1749,7 +1821,7 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ - public void split(final byte [] tableNameOrRegionName) + public void split(final byte[] tableNameOrRegionName) throws IOException, InterruptedException { split(tableNameOrRegionName, null); } @@ -1768,7 +1840,7 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws IOException if a remote or network exception occurs * @throws InterruptedException interrupt exception occurred */ - public void split(final byte [] tableNameOrRegionName, + public void split(final byte[] tableNameOrRegionName, final byte [] splitPoint) throws IOException, InterruptedException { CatalogTracker ct = getCatalogTracker(); try { @@ -1781,7 +1853,8 @@ public class HBaseAdmin implements Abortable, Closeable { split(regionServerPair.getSecond(), regionServerPair.getFirst(), splitPoint); } } else { - final String tableName = tableNameString(tableNameOrRegionName, ct); + final TableName tableName = + checkTableExists(TableName.valueOf(tableNameOrRegionName), ct); List> pairs = MetaReader.getTableRegionsAndLocations(ct, tableName); @@ -1817,11 +1890,11 @@ public class HBaseAdmin implements Abortable, Closeable { * @param htd modified description of the table * @throws IOException if a remote or network exception occurs */ - public void modifyTable(final byte [] tableName, final HTableDescriptor htd) + public void modifyTable(final TableName tableName, final HTableDescriptor htd) throws IOException { - if (!Bytes.equals(tableName, htd.getName())) { - throw new IllegalArgumentException("the specified table name '" + Bytes.toString(tableName) + - "' doesn't match with the HTD one: " + htd.getNameAsString()); + if (!tableName.equals(htd.getTableName())) { + throw new IllegalArgumentException("the specified table name '" + tableName + + "' doesn't match with the HTD one: " + htd.getTableName()); } executeCallable(new MasterAdminCallable(getConnection()) { @@ -1834,6 +1907,16 @@ public class HBaseAdmin implements Abortable, Closeable { }); } + public void modifyTable(final byte[] tableName, final HTableDescriptor htd) + throws IOException { + modifyTable(TableName.valueOf(tableName), htd); + } + + public void modifyTable(final String tableName, final HTableDescriptor htd) + throws IOException { + modifyTable(TableName.valueOf(tableName), htd); + } + /** * @param tableNameOrRegionName Name of a table or name of a region. * @param ct A {@link CatalogTracker} instance (caller of this method usually has one). @@ -1875,21 +1958,21 @@ public class HBaseAdmin implements Abortable, Closeable { } /** - * Convert the table name byte array into a table name string and check if table - * exists or not. - * @param tableNameBytes Name of a table. + * Check if table exists or not + * @param tableName Name of a table. * @param ct A {@link CatalogTracker} instance (caller of this method usually has one). - * @return tableName in string form. + * @return tableName instance * @throws IOException if a remote or network exception occurs. * @throws TableNotFoundException if table does not exist. */ - private String tableNameString(final byte[] tableNameBytes, CatalogTracker ct) + //TODO rename this method + private TableName checkTableExists( + final TableName tableName, CatalogTracker ct) throws IOException { - String tableNameString = Bytes.toString(tableNameBytes); - if (!MetaReader.tableExists(ct, tableNameString)) { - throw new TableNotFoundException(tableNameString); + if (!MetaReader.tableExists(ct, tableName)) { + throw new TableNotFoundException(tableName); } - return tableNameString; + return tableName; } /** @@ -1958,9 +2041,9 @@ public class HBaseAdmin implements Abortable, Closeable { }); } - private HRegionLocation getFirstMetaServerForTable(final byte [] tableName) + private HRegionLocation getFirstMetaServerForTable(final TableName tableName) throws IOException { - return connection.locateRegion(HConstants.META_TABLE_NAME, + return connection.locateRegion(TableName.META_TABLE_NAME, HRegionInfo.createRegionName(tableName, null, HConstants.NINES, false)); } @@ -1972,6 +2055,127 @@ public class HBaseAdmin implements Abortable, Closeable { } /** + * Create a new namespace + * @param descriptor descriptor which describes the new namespace + * @throws IOException + */ + public void createNamespace(final NamespaceDescriptor descriptor) throws IOException { + executeCallable(new MasterAdminCallable(getConnection()) { + @Override + public Void call() throws Exception { + masterAdmin.createNamespace(null, + MasterAdminProtos.CreateNamespaceRequest.newBuilder() + .setNamespaceDescriptor(ProtobufUtil + .toProtoNamespaceDescriptor(descriptor)).build()); + return null; + } + }); + } + + /** + * Modify an existing namespace + * @param descriptor descriptor which describes the new namespace + * @throws IOException + */ + public void modifyNamespace(final NamespaceDescriptor descriptor) throws IOException { + executeCallable(new MasterAdminCallable(getConnection()) { + @Override + public Void call() throws Exception { + masterAdmin.modifyNamespace(null, + MasterAdminProtos.ModifyNamespaceRequest.newBuilder() + .setNamespaceDescriptor(ProtobufUtil + .toProtoNamespaceDescriptor(descriptor)).build()); + return null; + } + }); + } + + /** + * Delete an existing namespace. Only empty namespaces (no tables) can be removed. + * @param name namespace name + * @throws IOException + */ + public void deleteNamespace(final String name) throws IOException { + executeCallable(new MasterAdminCallable(getConnection()) { + @Override + public Void call() throws Exception { + masterAdmin.deleteNamespace(null, + MasterAdminProtos.DeleteNamespaceRequest.newBuilder() + .setNamespaceName(name).build()); + return null; + } + }); + } + + /** + * Get a namespace descriptor by name + * @param name name of namespace descriptor + * @return + * @throws IOException + */ + public NamespaceDescriptor getNamespaceDescriptor(final String name) throws IOException { + return + executeCallable(new MasterAdminCallable(getConnection()) { + @Override + public NamespaceDescriptor call() throws Exception { + return ProtobufUtil.toNamespaceDescriptor( + masterAdmin.getNamespaceDescriptor(null, + MasterAdminProtos.GetNamespaceDescriptorRequest.newBuilder() + .setNamespaceName(name).build()).getNamespaceDescriptor()); + } + }); + } + + /** + * List available namespace descriptors + * @return + * @throws IOException + */ + public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException { + return + executeCallable(new MasterAdminCallable(getConnection()) { + @Override + public NamespaceDescriptor[] call() throws Exception { + List list = + masterAdmin.listNamespaceDescriptors(null, + MasterAdminProtos.ListNamespaceDescriptorsRequest.newBuilder().build()) + .getNamespaceDescriptorList(); + NamespaceDescriptor[] res = new NamespaceDescriptor[list.size()]; + for(int i = 0; i < list.size(); i++) { + res[i] = ProtobufUtil.toNamespaceDescriptor(list.get(i)); + } + return res; + } + }); + } + + /** + * Get list of table descriptors by namespace + * @param name namespace name + * @return + * @throws IOException + */ + public HTableDescriptor[] getTableDescriptorsByNamespace(final String name) throws IOException { + return + executeCallable(new MasterAdminCallable(getConnection()) { + @Override + public HTableDescriptor[] call() throws Exception { + List list = + masterAdmin.getTableDescriptorsByNamespace(null, + MasterAdminProtos.GetTableDescriptorsByNamespaceRequest.newBuilder() + .setNamespaceName(name).build()) + .getTableSchemaList(); + HTableDescriptor[] res = new HTableDescriptor[list.size()]; + for(int i=0; i < list.size(); i++) { + + res[i] = HTableDescriptor.convert(list.get(i)); + } + return res; + } + }); + } + + /** * Check to see if HBase is running. Throw an exception if not. * We consider that HBase is running if ZooKeeper and Master are running. * @@ -2029,7 +2233,7 @@ public class HBaseAdmin implements Abortable, Closeable { * @return Ordered list of {@link HRegionInfo}. * @throws IOException */ - public List getTableRegions(final byte[] tableName) + public List getTableRegions(final TableName tableName) throws IOException { CatalogTracker ct = getCatalogTracker(); List Regions = null; @@ -2041,6 +2245,11 @@ public class HBaseAdmin implements Abortable, Closeable { return Regions; } + public List getTableRegions(final byte[] tableName) + throws IOException { + return getTableRegions(TableName.valueOf(tableName)); + } + @Override public void close() throws IOException { if (cleanupConnectionOnClose && this.connection != null) { @@ -2048,18 +2257,34 @@ public class HBaseAdmin implements Abortable, Closeable { } } - /** - * Get tableDescriptors - * @param tableNames List of table names - * @return HTD[] the tableDescriptor - * @throws IOException if a remote or network exception occurs - */ - public HTableDescriptor[] getTableDescriptors(List tableNames) + /** + * Get tableDescriptors + * @param tableNames List of table names + * @return HTD[] the tableDescriptor + * @throws IOException if a remote or network exception occurs + */ + public HTableDescriptor[] getTableDescriptorsByTableName(List tableNames) throws IOException { - return this.connection.getHTableDescriptors(tableNames); + return this.connection.getHTableDescriptorsByTableName(tableNames); } /** + * Get tableDescriptors + * @param tableNames List of table names + * @return HTD[] the tableDescriptor + * @throws IOException if a remote or network exception occurs + */ + public HTableDescriptor[] getTableDescriptors(List names) + throws IOException { + List tableNames = new ArrayList(names.size()); + for(String name : names) { + tableNames.add(TableName.valueOf(name)); + } + return getTableDescriptorsByTableName(tableNames); + } + + + /** * Roll the log writer. That is, start writing log messages to a new file. * * @param serverName @@ -2123,7 +2348,7 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws InterruptedException * @return the current compaction state */ - public CompactionState getCompactionState(final byte [] tableNameOrRegionName) + public CompactionState getCompactionState(final byte[] tableNameOrRegionName) throws IOException, InterruptedException { CompactionState state = CompactionState.NONE; CatalogTracker ct = getCatalogTracker(); @@ -2142,7 +2367,8 @@ public class HBaseAdmin implements Abortable, Closeable { return response.getCompactionState(); } } else { - final String tableName = tableNameString(tableNameOrRegionName, ct); + final TableName tableName = + checkTableExists(TableName.valueOf(tableNameOrRegionName), ct); List> pairs = MetaReader.getTableRegionsAndLocations(ct, tableName); for (Pair pair: pairs) { @@ -2198,36 +2424,54 @@ public class HBaseAdmin implements Abortable, Closeable { * a {@link SnapshotCreationException} indicating the duplicate naming. *

* Snapshot names follow the same naming constraints as tables in HBase. See - * {@link HTableDescriptor#isLegalTableName(byte[])}. + * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. * @param snapshotName name of the snapshot to be created * @param tableName name of the table for which snapshot is created * @throws IOException if a remote or network exception occurs * @throws SnapshotCreationException if snapshot creation failed * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ - public void snapshot(final String snapshotName, final String tableName) throws IOException, + public void snapshot(final String snapshotName, + final TableName tableName) throws IOException, SnapshotCreationException, IllegalArgumentException { snapshot(snapshotName, tableName, SnapshotDescription.Type.FLUSH); } - /** - * Create a timestamp consistent snapshot for the given table. - *

- * Snapshots are considered unique based on the name of the snapshot. Attempts to take a - * snapshot with the same name (even a different type or with different parameters) will fail with - * a {@link SnapshotCreationException} indicating the duplicate naming. - *

- * Snapshot names follow the same naming constraints as tables in HBase. See - * {@link HTableDescriptor#isLegalTableName(byte[])}. - * @param snapshotName name of the snapshot to be created - * @param tableName name of the table for which snapshot is created - * @throws IOException if a remote or network exception occurs - * @throws SnapshotCreationException if snapshot creation failed - * @throws IllegalArgumentException if the snapshot request is formatted incorrectly - */ - public void snapshot(final byte[] snapshotName, final byte[] tableName) throws IOException, + public void snapshot(final String snapshotName, + final String tableName) throws IOException, + SnapshotCreationException, IllegalArgumentException { + snapshot(snapshotName, TableName.valueOf(tableName), + SnapshotDescription.Type.FLUSH); + } + + /** + public void snapshot(final String snapshotName, + * Create a timestamp consistent snapshot for the given table. + final byte[] tableName) throws IOException, + *

+ * Snapshots are considered unique based on the name of the snapshot. Attempts to take a + * snapshot with the same name (even a different type or with different parameters) will fail with + * a {@link SnapshotCreationException} indicating the duplicate naming. + *

+ * Snapshot names follow the same naming constraints as tables in HBase. See + * {@link HTableDescriptor#isLegalTableName(byte[])}. + * @param snapshotName name of the snapshot to be created + * @param tableName name of the table for which snapshot is created + * @throws IOException if a remote or network exception occurs + * @throws SnapshotCreationException if snapshot creation failed + * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + */ + public void snapshot(final byte[] snapshotName, + final TableName tableName) throws IOException, SnapshotCreationException, IllegalArgumentException { - snapshot(Bytes.toString(snapshotName), Bytes.toString(tableName)); + snapshot(Bytes.toString(snapshotName), tableName, SnapshotDescription.Type.FLUSH); + } + + public void snapshot(final byte[] snapshotName, + final byte[] tableName) throws IOException, + SnapshotCreationException, IllegalArgumentException { + snapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName), + SnapshotDescription.Type.FLUSH); } /** @@ -2238,7 +2482,7 @@ public class HBaseAdmin implements Abortable, Closeable { * a {@link SnapshotCreationException} indicating the duplicate naming. *

* Snapshot names follow the same naming constraints as tables in HBase. See - * {@link HTableDescriptor#isLegalTableName(byte[])}. + * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. *

* @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other * snapshots stored on the cluster @@ -2248,16 +2492,31 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws SnapshotCreationException if snapshot creation failed * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ - public void snapshot(final String snapshotName, final String tableName, - SnapshotDescription.Type type) throws IOException, SnapshotCreationException, + public void snapshot(final String snapshotName, + final TableName tableName, + SnapshotDescription.Type type) throws IOException, SnapshotCreationException, IllegalArgumentException { SnapshotDescription.Builder builder = SnapshotDescription.newBuilder(); - builder.setTable(tableName); + builder.setTable(tableName.getNameAsString()); builder.setName(snapshotName); builder.setType(type); snapshot(builder.build()); } + public void snapshot(final String snapshotName, + final String tableName, + SnapshotDescription.Type type) throws IOException, SnapshotCreationException, + IllegalArgumentException { + snapshot(snapshotName, TableName.valueOf(tableName), type); + } + + public void snapshot(final String snapshotName, + final byte[] tableName, + SnapshotDescription.Type type) throws IOException, SnapshotCreationException, + IllegalArgumentException { + snapshot(snapshotName, TableName.valueOf(tableName), type); + } + /** * Take a snapshot and wait for the server to complete that snapshot (blocking). *

@@ -2270,7 +2529,7 @@ public class HBaseAdmin implements Abortable, Closeable { * a {@link SnapshotCreationException} indicating the duplicate naming. *

* Snapshot names follow the same naming constraints as tables in HBase. See - * {@link HTableDescriptor#isLegalTableName(byte[])}. + * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. *

* You should probably use {@link #snapshot(String, String)} or {@link #snapshot(byte[], byte[])} * unless you are sure about the type of snapshot that you want to take. @@ -2406,10 +2665,10 @@ public class HBaseAdmin implements Abortable, Closeable { throws IOException, RestoreSnapshotException { String rollbackSnapshot = snapshotName + "-" + EnvironmentEdgeManager.currentTimeMillis(); - String tableName = null; + TableName tableName = null; for (SnapshotDescription snapshotInfo: listSnapshots()) { if (snapshotInfo.getName().equals(snapshotName)) { - tableName = snapshotInfo.getTable(); + tableName = TableName.valueOf(snapshotInfo.getTable()); break; } } @@ -2453,9 +2712,26 @@ public class HBaseAdmin implements Abortable, Closeable { */ public void cloneSnapshot(final byte[] snapshotName, final byte[] tableName) throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException { - cloneSnapshot(Bytes.toString(snapshotName), Bytes.toString(tableName)); + cloneSnapshot(Bytes.toString(snapshotName), TableName.valueOf(tableName)); + } + + /** + * Create a new table by cloning the snapshot content. + * + * @param snapshotName name of the snapshot to be cloned + * @param tableName name of the table where the snapshot will be restored + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be created already exists + * @throws RestoreSnapshotException if snapshot failed to be cloned + * @throws IllegalArgumentException if the specified table has not a valid name + */ + public void cloneSnapshot(final byte[] snapshotName, final TableName tableName) + throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException { + cloneSnapshot(Bytes.toString(snapshotName), tableName); } + + /** * Create a new table by cloning the snapshot content. * @@ -2468,11 +2744,26 @@ public class HBaseAdmin implements Abortable, Closeable { */ public void cloneSnapshot(final String snapshotName, final String tableName) throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException { + cloneSnapshot(snapshotName, TableName.valueOf(tableName)); + } + + /** + * Create a new table by cloning the snapshot content. + * + * @param snapshotName name of the snapshot to be cloned + * @param tableName name of the table where the snapshot will be restored + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be created already exists + * @throws RestoreSnapshotException if snapshot failed to be cloned + * @throws IllegalArgumentException if the specified table has not a valid name + */ + public void cloneSnapshot(final String snapshotName, final TableName tableName) + throws IOException, TableExistsException, RestoreSnapshotException, InterruptedException { if (tableExists(tableName)) { - throw new TableExistsException(tableName); + throw new TableExistsException("Table '" + tableName + " already exists"); } internalRestoreSnapshot(snapshotName, tableName); - waitUntilTableIsEnabled(Bytes.toBytes(tableName)); + waitUntilTableIsEnabled(tableName); } /** @@ -2485,10 +2776,11 @@ public class HBaseAdmin implements Abortable, Closeable { * @throws RestoreSnapshotException if snapshot failed to be restored * @throws IllegalArgumentException if the restore request is formatted incorrectly */ - private void internalRestoreSnapshot(final String snapshotName, final String tableName) + private void internalRestoreSnapshot(final String snapshotName, final TableName + tableName) throws IOException, RestoreSnapshotException { SnapshotDescription snapshot = SnapshotDescription.newBuilder() - .setName(snapshotName).setTable(tableName).build(); + .setName(snapshotName).setTable(tableName.getNameAsString()).build(); // actually restore the snapshot internalRestoreSnapshotAsync(snapshot); @@ -2609,7 +2901,7 @@ public class HBaseAdmin implements Abortable, Closeable { */ public void deleteSnapshot(final String snapshotName) throws IOException { // make sure the snapshot is possibly valid - HTableDescriptor.isLegalTableName(Bytes.toBytes(snapshotName)); + TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(snapshotName)); // do the delete executeCallable(new MasterAdminCallable(getConnection()) { @Override @@ -2652,7 +2944,7 @@ public class HBaseAdmin implements Abortable, Closeable { } /** - * @see {@link #execute(MasterAdminCallable)} + * @see {@link #executeCallable(org.apache.hadoop.hbase.client.HBaseAdmin.MasterCallable)} */ abstract static class MasterAdminCallable extends MasterCallable { protected MasterAdminKeepAliveConnection masterAdmin; @@ -2673,7 +2965,7 @@ public class HBaseAdmin implements Abortable, Closeable { } /** - * @see {@link #execute(MasterMonitorCallable)} + * @see {@link #executeCallable(org.apache.hadoop.hbase.client.HBaseAdmin.MasterCallable)} */ abstract static class MasterMonitorCallable extends MasterCallable { protected MasterMonitorKeepAliveConnection masterMonitor; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java index c6b3e3b..3806474 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; @@ -82,6 +83,9 @@ public interface HConnection extends Abortable, Closeable { * @return true if the table is enabled, false otherwise * @throws IOException if a remote or network exception occurs */ + boolean isTableEnabled(TableName tableName) throws IOException; + + @Deprecated boolean isTableEnabled(byte[] tableName) throws IOException; /** @@ -89,6 +93,9 @@ public interface HConnection extends Abortable, Closeable { * @return true if the table is disabled, false otherwise * @throws IOException if a remote or network exception occurs */ + boolean isTableDisabled(TableName tableName) throws IOException; + + @Deprecated boolean isTableDisabled(byte[] tableName) throws IOException; /** @@ -96,6 +103,9 @@ public interface HConnection extends Abortable, Closeable { * @return true if all regions of the table are available, false otherwise * @throws IOException if a remote or network exception occurs */ + boolean isTableAvailable(TableName tableName) throws IOException; + + @Deprecated boolean isTableAvailable(byte[] tableName) throws IOException; /** @@ -110,7 +120,12 @@ public interface HConnection extends Abortable, Closeable { * @throws IOException * if a remote or network exception occurs */ - boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws IOException; + boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws + IOException; + + @Deprecated + boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws + IOException; /** * List all the userspace tables. In other words, scan the META table. @@ -129,6 +144,10 @@ public interface HConnection extends Abortable, Closeable { * @return table metadata * @throws IOException if a remote or network exception occurs */ + HTableDescriptor getHTableDescriptor(TableName tableName) + throws IOException; + + @Deprecated HTableDescriptor getHTableDescriptor(byte[] tableName) throws IOException; @@ -141,10 +160,12 @@ public interface HConnection extends Abortable, Closeable { * question * @throws IOException if a remote or network exception occurs */ - HRegionLocation locateRegion( - final byte[] tableName, final byte[] row - ) - throws IOException; + public HRegionLocation locateRegion(final TableName tableName, + final byte [] row) throws IOException; + + @Deprecated + public HRegionLocation locateRegion(final byte[] tableName, + final byte [] row) throws IOException; /** * Allows flushing the region cache. @@ -157,6 +178,9 @@ public interface HConnection extends Abortable, Closeable { * @param tableName Name of the table whose regions we are to remove from * cache. */ + void clearRegionCache(final TableName tableName); + + @Deprecated void clearRegionCache(final byte[] tableName); /** @@ -174,10 +198,12 @@ public interface HConnection extends Abortable, Closeable { * question * @throws IOException if a remote or network exception occurs */ - HRegionLocation relocateRegion( - final byte[] tableName, final byte[] row - ) - throws IOException; + HRegionLocation relocateRegion(final TableName tableName, + final byte [] row) throws IOException; + + @Deprecated + HRegionLocation relocateRegion(final byte[] tableName, + final byte [] row) throws IOException; /** * Update the location cache. This is used internally by HBase, in most cases it should not be @@ -187,9 +213,12 @@ public interface HConnection extends Abortable, Closeable { * @param exception the exception if any. Can be null. * @param source the previous location */ - void updateCachedLocations( - byte[] tableName, byte[] rowkey, Object exception, HRegionLocation source - ); + void updateCachedLocations(TableName tableName, byte[] rowkey, + Object exception, HRegionLocation source); + + @Deprecated + void updateCachedLocations(byte[] tableName, byte[] rowkey, + Object exception, HRegionLocation source); /** * Gets the location of the region of regionName. @@ -207,8 +236,10 @@ public interface HConnection extends Abortable, Closeable { * @return list of region locations for all regions of table * @throws IOException */ - List locateRegions(final byte[] tableName) - throws IOException; + List locateRegions(final TableName tableName) throws IOException; + + @Deprecated + List locateRegions(final byte[] tableName) throws IOException; /** * Gets the locations of all regions in the specified table, tableName. @@ -219,9 +250,14 @@ public interface HConnection extends Abortable, Closeable { * @return list of region locations for all regions of table * @throws IOException */ - List locateRegions( - final byte[] tableName, final boolean useCache, final boolean offlined - ) throws IOException; + public List locateRegions(final TableName tableName, + final boolean useCache, + final boolean offlined) throws IOException; + + @Deprecated + public List locateRegions(final byte[] tableName, + final boolean useCache, + final boolean offlined) throws IOException; /** * Returns a {@link MasterAdminKeepAliveConnection} to the active master @@ -271,7 +307,12 @@ public interface HConnection extends Abortable, Closeable { * @return Location of row. * @throws IOException if a remote or network exception occurs */ - HRegionLocation getRegionLocation(byte [] tableName, byte [] row, + HRegionLocation getRegionLocation(TableName tableName, byte [] row, + boolean reload) + throws IOException; + + @Deprecated + HRegionLocation getRegionLocation(byte[] tableName, byte [] row, boolean reload) throws IOException; @@ -291,10 +332,12 @@ public interface HConnection extends Abortable, Closeable { * @deprecated since 0.96 - Use {@link HTableInterface#batch} instead */ @Deprecated - void processBatch( - List actions, final byte[] tableName, ExecutorService pool, Object[] results - ) - throws IOException, InterruptedException; + void processBatch(List actions, final TableName tableName, + ExecutorService pool, Object[] results) throws IOException, InterruptedException; + + @Deprecated + void processBatch(List actions, final byte[] tableName, + ExecutorService pool, Object[] results) throws IOException, InterruptedException; /** * Parameterized batch processing, allowing varying return types for different @@ -302,13 +345,18 @@ public interface HConnection extends Abortable, Closeable { * @deprecated since 0.96 - Use {@link HTableInterface#batchCallback} instead */ @Deprecated - void processBatchCallback( - List list, - byte[] tableName, - ExecutorService pool, - Object[] results, - Batch.Callback callback - ) throws IOException, InterruptedException; + public void processBatchCallback(List list, + final TableName tableName, + ExecutorService pool, + Object[] results, + Batch.Callback callback) throws IOException, InterruptedException; + + @Deprecated + public void processBatchCallback(List list, + final byte[] tableName, + ExecutorService pool, + Object[] results, + Batch.Callback callback) throws IOException, InterruptedException; /** * Enable or disable region cache prefetch for the table. It will be @@ -317,9 +365,11 @@ public interface HConnection extends Abortable, Closeable { * @param tableName name of table to configure. * @param enable Set to true to enable region cache prefetch. */ - void setRegionCachePrefetch( - final byte[] tableName, final boolean enable - ); + public void setRegionCachePrefetch(final TableName tableName, + final boolean enable); + + public void setRegionCachePrefetch(final byte[] tableName, + final boolean enable); /** * Check whether region cache prefetch is enabled or not. @@ -327,6 +377,8 @@ public interface HConnection extends Abortable, Closeable { * @return true if table's region cache prefetch is enabled. Otherwise * it is disabled. */ + boolean getRegionCachePrefetch(final TableName tableName); + boolean getRegionCachePrefetch(final byte[] tableName); /** @@ -341,8 +393,11 @@ public interface HConnection extends Abortable, Closeable { * @return HTD[] table metadata * @throws IOException if a remote or network exception occurs */ - HTableDescriptor[] getHTableDescriptors(List tableNames) - throws IOException; + HTableDescriptor[] getHTableDescriptorsByTableName(List tableNames) throws IOException; + + @Deprecated + HTableDescriptor[] getHTableDescriptors(List tableNames) throws + IOException; /** * @return true if this connection is closed diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index 87af457..0fbd679 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -45,6 +45,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Chore; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -69,6 +70,22 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos + .ListNamespaceDescriptorsResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos + .ListNamespaceDescriptorsRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos + .GetTableDescriptorsByNamespaceResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos + .GetTableDescriptorsByNamespaceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest; @@ -375,7 +392,7 @@ public class HConnectionManager { * @return Number of cached regions for the table. * @throws ZooKeeperConnectionException */ - static int getCachedRegionCount(Configuration conf, final byte[] tableName) + static int getCachedRegionCount(Configuration conf, final TableName tableName) throws IOException { return execute(new HConnectable(conf) { @Override @@ -391,7 +408,9 @@ public class HConnectionManager { * @return true if the region where the table and row reside is cached. * @throws ZooKeeperConnectionException */ - static boolean isRegionCached(Configuration conf, final byte[] tableName, final byte[] row) + static boolean isRegionCached(Configuration conf, + final TableName tableName, + final byte[] row) throws IOException { return execute(new HConnectable(conf) { @Override @@ -471,11 +490,11 @@ public class HConnectionManager { private RpcClient rpcClient; /** - * Map of table to table {@link HRegionLocation}s. The table key is made - * by doing a {@link Bytes#mapKey(byte[])} of the table's name. - */ - private final Map> cachedRegionLocations = - new HashMap>(); + * Map of table to table {@link HRegionLocation}s. + */ + private final Map> + cachedRegionLocations = + new HashMap>(); // The presence of a server in the map implies it's likely that there is an // entry in cachedRegionLocations that map to this server; but the absence @@ -648,24 +667,41 @@ public class HConnectionManager { } @Override - public HRegionLocation getRegionLocation(final byte [] name, + public HRegionLocation getRegionLocation(final TableName tableName, final byte [] row, boolean reload) throws IOException { - return reload? relocateRegion(name, row): locateRegion(name, row); + return reload? relocateRegion(tableName, row): locateRegion(tableName, row); } @Override - public boolean isTableEnabled(byte[] tableName) throws IOException { + public HRegionLocation getRegionLocation(final byte[] tableName, + final byte [] row, boolean reload) + throws IOException { + return getRegionLocation(TableName.valueOf(tableName), row, reload); + } + + @Override + public boolean isTableEnabled(TableName tableName) throws IOException { return this.registry.isTableOnlineState(tableName, true); } @Override - public boolean isTableDisabled(byte[] tableName) throws IOException { + public boolean isTableEnabled(byte[] tableName) throws IOException { + return isTableEnabled(TableName.valueOf(tableName)); + } + + @Override + public boolean isTableDisabled(TableName tableName) throws IOException { return this.registry.isTableOnlineState(tableName, false); } @Override - public boolean isTableAvailable(final byte[] tableName) throws IOException { + public boolean isTableDisabled(byte[] tableName) throws IOException { + return isTableDisabled(TableName.valueOf(tableName)); + } + + @Override + public boolean isTableAvailable(final TableName tableName) throws IOException { final AtomicBoolean available = new AtomicBoolean(true); final AtomicInteger regionCount = new AtomicInteger(0); MetaScannerVisitor visitor = new MetaScannerVisitorBase() { @@ -673,14 +709,15 @@ public class HConnectionManager { public boolean processRow(Result row) throws IOException { HRegionInfo info = MetaScanner.getHRegionInfo(row); if (info != null) { - if (Bytes.compareTo(tableName, info.getTableName()) == 0) { + if (tableName.equals(info.getTableName())) { ServerName server = HRegionInfo.getServerName(row); if (server == null) { available.set(false); return false; } regionCount.incrementAndGet(); - } else if (Bytes.compareTo(tableName, info.getTableName()) < 0) { + } else if (tableName.compareTo( + info.getTableName()) < 0) { // Return if we are done with the current table return false; } @@ -693,7 +730,12 @@ public class HConnectionManager { } @Override - public boolean isTableAvailable(final byte[] tableName, final byte[][] splitKeys) + public boolean isTableAvailable(final byte[] tableName) throws IOException { + return isTableAvailable(TableName.valueOf(tableName)); + } + + @Override + public boolean isTableAvailable(final TableName tableName, final byte[][] splitKeys) throws IOException { final AtomicBoolean available = new AtomicBoolean(true); final AtomicInteger regionCount = new AtomicInteger(0); @@ -702,7 +744,7 @@ public class HConnectionManager { public boolean processRow(Result row) throws IOException { HRegionInfo info = MetaScanner.getHRegionInfo(row); if (info != null) { - if (Bytes.compareTo(tableName, info.getTableName()) == 0) { + if (tableName.equals(info.getTableName())) { ServerName server = HRegionInfo.getServerName(row); if (server == null) { available.set(false); @@ -720,7 +762,7 @@ public class HConnectionManager { // Always empty start row should be counted regionCount.incrementAndGet(); } - } else if (Bytes.compareTo(tableName, info.getTableName()) < 0) { + } else if (tableName.compareTo(info.getTableName()) < 0) { // Return if we are done with the current table return false; } @@ -734,6 +776,12 @@ public class HConnectionManager { } @Override + public boolean isTableAvailable(final byte[] tableName, final byte[][] splitKeys) + throws IOException { + return isTableAvailable(TableName.valueOf(tableName), splitKeys); + } + + @Override public HRegionLocation locateRegion(final byte[] regionName) throws IOException { return locateRegion(HRegionInfo.getTableName(regionName), HRegionInfo.getStartKey(regionName), false, true); @@ -749,14 +797,20 @@ public class HConnectionManager { } @Override - public List locateRegions(final byte[] tableName) + public List locateRegions(final TableName tableName) throws IOException { return locateRegions (tableName, false, true); } @Override - public List locateRegions(final byte[] tableName, final boolean useCache, - final boolean offlined) throws IOException { + public List locateRegions(final byte[] tableName) + throws IOException { + return locateRegions(TableName.valueOf(tableName)); + } + + @Override + public List locateRegions(final TableName tableName, + final boolean useCache, final boolean offlined) throws IOException { NavigableMap regions = MetaScanner.allTableRegions(conf, this, tableName, offlined); final List locations = new ArrayList(); @@ -767,41 +821,59 @@ public class HConnectionManager { } @Override - public HRegionLocation locateRegion(final byte [] tableName, + public List locateRegions(final byte[] tableName, + final boolean useCache, final boolean offlined) throws IOException { + return locateRegions(TableName.valueOf(tableName), useCache, offlined); + } + + @Override + public HRegionLocation locateRegion(final TableName tableName, final byte [] row) throws IOException{ return locateRegion(tableName, row, true, true); } @Override - public HRegionLocation relocateRegion(final byte [] tableName, + public HRegionLocation locateRegion(final byte[] tableName, final byte [] row) throws IOException{ + return locateRegion(TableName.valueOf(tableName), row); + } + @Override + public HRegionLocation relocateRegion(final TableName tableName, + final byte [] row) throws IOException{ // Since this is an explicit request not to use any caching, finding // disabled tables should not be desirable. This will ensure that an exception is thrown when // the first time a disabled table is interacted with. if (isTableDisabled(tableName)) { - throw new DoNotRetryIOException(Bytes.toString(tableName) + " is disabled."); + throw new DoNotRetryIOException(tableName.getNameAsString() + " is disabled."); } return locateRegion(tableName, row, false, true); } - private HRegionLocation locateRegion(final byte [] tableName, + @Override + public HRegionLocation relocateRegion(final byte[] tableName, + final byte [] row) throws IOException { + return relocateRegion(TableName.valueOf(tableName), row); + } + + + private HRegionLocation locateRegion(final TableName tableName, final byte [] row, boolean useCache, boolean retry) throws IOException { if (this.closed) throw new IOException(toString() + " closed"); - if (tableName == null || tableName.length == 0) { + if (tableName== null || tableName.getName().length == 0) { throw new IllegalArgumentException( "table name cannot be null or zero length"); } - if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { + if (tableName.equals(TableName.META_TABLE_NAME)) { return this.registry.getMetaRegionLocation(); } else { // Region not in the cache - have to go to the meta RS - return locateRegionInMeta(HConstants.META_TABLE_NAME, tableName, row, + return locateRegionInMeta(TableName.META_TABLE_NAME, tableName, row, useCache, userRegionLock, retry); } } @@ -811,7 +883,7 @@ public class HConnectionManager { * row we're seeking. It will prefetch certain number of regions info and * save them to the global region cache. */ - private void prefetchRegionCache(final byte[] tableName, + private void prefetchRegionCache(final TableName tableName, final byte[] row) { // Implement a new visitor for MetaScanner, and use it to walk through // the .META. @@ -824,7 +896,7 @@ public class HConnectionManager { } // possible we got a region of a different table... - if (!Bytes.equals(regionInfo.getTableName(), tableName)) { + if (!regionInfo.getTableName().equals(tableName)) { return false; // stop scanning } if (regionInfo.isOffline()) { @@ -850,7 +922,7 @@ public class HConnectionManager { try { // pre-fetch certain number of regions info at region cache. MetaScanner.metaScan(conf, this, visitor, tableName, row, - this.prefetchRegionLimit, HConstants.META_TABLE_NAME); + this.prefetchRegionLimit, TableName.META_TABLE_NAME); } catch (IOException e) { LOG.warn("Encountered problems when prefetch META table: ", e); } @@ -860,8 +932,8 @@ public class HConnectionManager { * Search the .META. table for the HRegionLocation * info that contains the table and row we're seeking. */ - private HRegionLocation locateRegionInMeta(final byte [] parentTable, - final byte [] tableName, final byte [] row, boolean useCache, + private HRegionLocation locateRegionInMeta(final TableName parentTable, + final TableName tableName, final byte [] row, boolean useCache, Object regionLockObject, boolean retry) throws IOException { HRegionLocation location; @@ -907,7 +979,7 @@ public class HConnectionManager { } // If the parent table is META, we may want to pre-fetch some // region info into the global region cache for this table. - if (Bytes.equals(parentTable, HConstants.META_TABLE_NAME) + if (parentTable.equals(TableName.META_TABLE_NAME) && (getRegionCachePrefetch(tableName))) { prefetchRegionCache(tableName, row); } @@ -926,21 +998,21 @@ public class HConnectionManager { HConstants.CATALOG_FAMILY); } if (regionInfoRow == null) { - throw new TableNotFoundException(Bytes.toString(tableName)); + throw new TableNotFoundException(tableName); } // convert the row result into the HRegionLocation we need! HRegionInfo regionInfo = MetaScanner.getHRegionInfo(regionInfoRow); if (regionInfo == null) { throw new IOException("HRegionInfo was null or empty in " + - Bytes.toString(parentTable) + ", row=" + regionInfoRow); + parentTable + ", row=" + regionInfoRow); } // possible we got a region of a different table... - if (!Bytes.equals(regionInfo.getTableName(), tableName)) { + if (!regionInfo.getTableName().equals(tableName)) { throw new TableNotFoundException( - "Table '" + Bytes.toString(tableName) + "' was not found, got: " + - Bytes.toString(regionInfo.getTableName()) + "."); + "Table '" + tableName + "' was not found, got: " + + regionInfo.getTableName() + "."); } if (regionInfo.isSplit()) { throw new RegionOfflineException("the only available region for" + @@ -957,7 +1029,7 @@ public class HConnectionManager { ServerName serverName = HRegionInfo.getServerName(regionInfoRow); if (serverName == null) { throw new NoServerForRegionException("No server address listed " + - "in " + Bytes.toString(parentTable) + " for region " + + "in " + parentTable + " for region " + regionInfo.getRegionNameAsString() + " containing row " + Bytes.toStringBinary(row)); } @@ -985,7 +1057,7 @@ public class HConnectionManager { if (tries < numTries - 1) { if (LOG.isDebugEnabled()) { LOG.debug("locateRegionInMeta parentTable=" + - Bytes.toString(parentTable) + ", metaLocation=" + + parentTable + ", metaLocation=" + ((metaLocation == null)? "null": "{" + metaLocation + "}") + ", attempt=" + tries + " of " + this.numTries + " failed; retrying after sleep of " + @@ -1021,9 +1093,9 @@ public class HConnectionManager { * @param row * @return Null or region location found in cache. */ - HRegionLocation getCachedLocation(final byte [] tableName, + HRegionLocation getCachedLocation(final TableName tableName, final byte [] row) { - SoftValueSortedMap tableLocations = + SoftValueSortedMap tableLocations = getTableLocations(tableName); // start to examine the cache. we can only do cache actions @@ -1063,7 +1135,7 @@ public class HConnectionManager { * @param tableName tableName * @param row */ - void forceDeleteCachedLocation(final byte [] tableName, final byte [] row) { + void forceDeleteCachedLocation(final TableName tableName, final byte [] row) { HRegionLocation rl = null; synchronized (this.cachedRegionLocations) { Map tableLocations = getTableLocations(tableName); @@ -1079,7 +1151,7 @@ public class HConnectionManager { if ((rl != null) && LOG.isDebugEnabled()) { LOG.debug("Removed " + rl.getHostname() + ":" + rl.getPort() + " as a location of " + rl.getRegionInfo().getRegionNameAsString() + - " for tableName=" + Bytes.toString(tableName) + " from cache"); + " for tableName=" + tableName + " from cache"); } } @@ -1115,18 +1187,16 @@ public class HConnectionManager { * @param tableName * @return Map of cached locations for passed tableName */ - private SoftValueSortedMap getTableLocations( - final byte [] tableName) { + private SoftValueSortedMap getTableLocations( + final TableName tableName) { // find the map of cached locations for this table - Integer key = Bytes.mapKey(tableName); - SoftValueSortedMap result; + SoftValueSortedMap result; synchronized (this.cachedRegionLocations) { - result = this.cachedRegionLocations.get(key); + result = this.cachedRegionLocations.get(tableName); // if tableLocations for this table isn't built yet, make one if (result == null) { - result = new SoftValueSortedMap( - Bytes.BYTES_COMPARATOR); - this.cachedRegionLocations.put(key, result); + result = new SoftValueSortedMap(Bytes.BYTES_COMPARATOR); + this.cachedRegionLocations.put(tableName, result); } } return result; @@ -1141,23 +1211,28 @@ public class HConnectionManager { } @Override - public void clearRegionCache(final byte [] tableName) { + public void clearRegionCache(final TableName tableName) { synchronized (this.cachedRegionLocations) { - this.cachedRegionLocations.remove(Bytes.mapKey(tableName)); + this.cachedRegionLocations.remove(tableName); } } + @Override + public void clearRegionCache(final byte[] tableName) { + clearRegionCache(TableName.valueOf(tableName)); + } + /** * Put a newly discovered HRegionLocation into the cache. * @param tableName The table name. * @param source the source of the new location, if it's not coming from meta * @param location the new location */ - private void cacheLocation(final byte [] tableName, final HRegionLocation source, + private void cacheLocation(final TableName tableName, final HRegionLocation source, final HRegionLocation location) { boolean isFromMeta = (source == null); byte [] startKey = location.getRegionInfo().getStartKey(); - Map tableLocations = + Map tableLocations = getTableLocations(tableName); boolean isNewCacheEntry = false; boolean isStaleUpdate = false; @@ -1860,6 +1935,36 @@ public class HConnectionManager { } @Override + public ModifyNamespaceResponse modifyNamespace(RpcController controller, ModifyNamespaceRequest request) throws ServiceException { + return stub.modifyNamespace(controller, request); + } + + @Override + public CreateNamespaceResponse createNamespace(RpcController controller, CreateNamespaceRequest request) throws ServiceException { + return stub.createNamespace(controller, request); + } + + @Override + public DeleteNamespaceResponse deleteNamespace(RpcController controller, DeleteNamespaceRequest request) throws ServiceException { + return stub.deleteNamespace(controller, request); + } + + @Override + public GetNamespaceDescriptorResponse getNamespaceDescriptor(RpcController controller, GetNamespaceDescriptorRequest request) throws ServiceException { + return stub.getNamespaceDescriptor(controller, request); + } + + @Override + public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController controller, ListNamespaceDescriptorsRequest request) throws ServiceException { + return stub.listNamespaceDescriptors(controller, request); + } + + @Override + public GetTableDescriptorsByNamespaceResponse getTableDescriptorsByNamespace(RpcController controller, GetTableDescriptorsByNamespaceRequest request) throws ServiceException { + return stub.getTableDescriptorsByNamespace(controller, request); + } + + @Override public void close() { release(this.mss); } @@ -2003,8 +2108,9 @@ public class HConnectionManager { return; } synchronized (this.cachedRegionLocations) { - byte[] tableName = location.getRegionInfo().getTableName(); - Map tableLocations = getTableLocations(tableName); + TableName tableName = location.getRegionInfo().getTableName(); + Map tableLocations = + getTableLocations(tableName); if (!tableLocations.isEmpty()) { // Delete if there's something in the cache for this region. HRegionLocation removedLocation = @@ -2012,7 +2118,7 @@ public class HConnectionManager { if (LOG.isDebugEnabled() && removedLocation != null) { LOG.debug("Removed " + location.getRegionInfo().getRegionNameAsString() + - " for tableName=" + Bytes.toString(tableName) + + " for tableName=" + tableName + " from cache"); } } @@ -2027,11 +2133,11 @@ public class HConnectionManager { * @param source server that is the source of the location update. */ @Override - public void updateCachedLocations(final byte[] tableName, byte[] rowkey, + public void updateCachedLocations(final TableName tableName, byte[] rowkey, final Object exception, final HRegionLocation source) { if (rowkey == null || tableName == null) { LOG.warn("Coding error, see method javadoc. row=" + (rowkey == null ? "null" : rowkey) + - ", tableName=" + (tableName == null ? "null" : Bytes.toString(tableName))); + ", tableName=" + (tableName == null ? "null" : tableName)); return; } @@ -2062,9 +2168,15 @@ public class HConnectionManager { } @Override + public void updateCachedLocations(final byte[] tableName, byte[] rowkey, + final Object exception, final HRegionLocation source) { + updateCachedLocations(TableName.valueOf(tableName), rowkey, exception, source); + } + + @Override @Deprecated public void processBatch(List list, - final byte[] tableName, + final TableName tableName, ExecutorService pool, Object[] results) throws IOException, InterruptedException { // This belongs in HTable!!! Not in here. St.Ack @@ -2077,6 +2189,15 @@ public class HConnectionManager { processBatchCallback(list, tableName, pool, results, null); } + @Override + @Deprecated + public void processBatch(List list, + final byte[] tableName, + ExecutorService pool, + Object[] results) throws IOException, InterruptedException { + processBatch(list, TableName.valueOf(tableName), pool, results); + } + /** * Send the queries in parallel on the different region servers. Retries on failures. * If the method returns it means that there is no error, and the 'results' array will @@ -2088,7 +2209,7 @@ public class HConnectionManager { @Deprecated public void processBatchCallback( List list, - byte[] tableName, + TableName tableName, ExecutorService pool, Object[] results, Batch.Callback callback) @@ -2108,8 +2229,20 @@ public class HConnectionManager { } } + @Override + @Deprecated + public void processBatchCallback( + List list, + byte[] tableName, + ExecutorService pool, + Object[] results, + Batch.Callback callback) + throws IOException, InterruptedException { + processBatchCallback(list, TableName.valueOf(tableName), pool, results, callback); + } + // For tests. - protected AsyncProcess createAsyncProcess(byte[] tableName, ExecutorService pool, + protected AsyncProcess createAsyncProcess(TableName tableName, ExecutorService pool, AsyncProcess.AsyncProcessCallback callback, Configuration conf) { return new AsyncProcess(this, tableName, pool, callback, conf, RpcRetryingCallerFactory.instantiate(conf)); @@ -2158,10 +2291,9 @@ public class HConnectionManager { * Return the number of cached region for a table. It will only be called * from a unit test. */ - int getNumberOfCachedRegionLocations(final byte[] tableName) { - Integer key = Bytes.mapKey(tableName); + int getNumberOfCachedRegionLocations(final TableName tableName) { synchronized (this.cachedRegionLocations) { - Map tableLocs = this.cachedRegionLocations.get(key); + Map tableLocs = this.cachedRegionLocations.get(tableName); if (tableLocs == null) { return 0; } @@ -2176,25 +2308,36 @@ public class HConnectionManager { * @param row row * @return Region cached or not. */ - boolean isRegionCached(final byte[] tableName, final byte[] row) { + boolean isRegionCached(TableName tableName, final byte[] row) { HRegionLocation location = getCachedLocation(tableName, row); return location != null; } @Override - public void setRegionCachePrefetch(final byte[] tableName, + public void setRegionCachePrefetch(final TableName tableName, final boolean enable) { if (!enable) { - regionCachePrefetchDisabledTables.add(Bytes.mapKey(tableName)); + regionCachePrefetchDisabledTables.add(Bytes.mapKey(tableName.getName())); } else { - regionCachePrefetchDisabledTables.remove(Bytes.mapKey(tableName)); + regionCachePrefetchDisabledTables.remove(Bytes.mapKey(tableName.getName())); } } @Override - public boolean getRegionCachePrefetch(final byte[] tableName) { - return !regionCachePrefetchDisabledTables.contains(Bytes.mapKey(tableName)); + public void setRegionCachePrefetch(final byte[] tableName, + final boolean enable) { + setRegionCachePrefetch(TableName.valueOf(tableName), enable); + } + + @Override + public boolean getRegionCachePrefetch(TableName tableName) { + return !regionCachePrefetchDisabledTables.contains(Bytes.mapKey(tableName.getName())); + } + + @Override + public boolean getRegionCachePrefetch(byte[] tableName) { + return getRegionCachePrefetch(TableName.valueOf(tableName)); } @Override @@ -2312,7 +2455,7 @@ public class HConnectionManager { MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitorService(); try { GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest((List)null); + RequestConverter.buildGetTableDescriptorsRequest((List)null); return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req)); } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); @@ -2322,7 +2465,8 @@ public class HConnectionManager { } @Override - public HTableDescriptor[] getHTableDescriptors(List tableNames) throws IOException { + public HTableDescriptor[] getHTableDescriptorsByTableName( + List tableNames) throws IOException { if (tableNames == null || tableNames.isEmpty()) return new HTableDescriptor[0]; MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitorService(); try { @@ -2336,6 +2480,17 @@ public class HConnectionManager { } } + @Override + public HTableDescriptor[] getHTableDescriptors( + List names) throws IOException { + List tableNames = new ArrayList(names.size()); + for(String name : names) { + tableNames.add(TableName.valueOf(name)); + } + + return getHTableDescriptorsByTableName(tableNames); + } + /** * Connects to the master to get the table descriptor. * @param tableName table name @@ -2344,10 +2499,10 @@ public class HConnectionManager { * is not found. */ @Override - public HTableDescriptor getHTableDescriptor(final byte[] tableName) + public HTableDescriptor getHTableDescriptor(final TableName tableName) throws IOException { - if (tableName == null || tableName.length == 0) return null; - if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { + if (tableName == null) return null; + if (tableName.equals(TableName.META_TABLE_NAME)) { return HTableDescriptor.META_TABLEDESC; } MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitorService(); @@ -2364,7 +2519,13 @@ public class HConnectionManager { if (!htds.getTableSchemaList().isEmpty()) { return HTableDescriptor.convert(htds.getTableSchemaList().get(0)); } - throw new TableNotFoundException(Bytes.toString(tableName)); + throw new TableNotFoundException(tableName.getNameAsString()); + } + + @Override + public HTableDescriptor getHTableDescriptor(final byte[] tableName) + throws IOException { + return getHTableDescriptor(TableName.valueOf(tableName)); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionWrapper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionWrapper.java index 1ff4fcd..c9954f8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionWrapper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionWrapper.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; import java.util.List; import java.util.concurrent.ExecutorService; @@ -28,7 +29,9 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; @@ -86,88 +89,146 @@ public class HConnectionWrapper implements HConnection { } @Override - public boolean isTableEnabled(byte[] tableName) throws IOException { + public boolean isTableEnabled(TableName tableName) throws IOException { return hconnection.isTableEnabled(tableName); } @Override - public boolean isTableDisabled(byte[] tableName) throws IOException { + public boolean isTableEnabled(byte[] tableName) throws IOException { + return isTableEnabled(TableName.valueOf(tableName)); + } + + @Override + public boolean isTableDisabled(TableName tableName) throws IOException { return hconnection.isTableDisabled(tableName); } @Override - public boolean isTableAvailable(byte[] tableName) throws IOException { + public boolean isTableDisabled(byte[] tableName) throws IOException { + return isTableDisabled(TableName.valueOf(tableName)); + } + + @Override + public boolean isTableAvailable(TableName tableName) throws IOException { return hconnection.isTableAvailable(tableName); } @Override - public boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) - throws IOException { + public boolean isTableAvailable(byte[] tableName) throws IOException { + return isTableAvailable(TableName.valueOf(tableName)); + } + + @Override + public boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException { return hconnection.isTableAvailable(tableName, splitKeys); } @Override + public boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws IOException { + return isTableAvailable(TableName.valueOf(tableName), splitKeys); + } + + @Override public HTableDescriptor[] listTables() throws IOException { return hconnection.listTables(); } @Override - public HTableDescriptor getHTableDescriptor(byte[] tableName) - throws IOException { + public HTableDescriptor getHTableDescriptor(TableName tableName) throws IOException { return hconnection.getHTableDescriptor(tableName); } @Override - public HRegionLocation locateRegion(byte[] tableName, byte[] row) - throws IOException { + public HTableDescriptor getHTableDescriptor(byte[] tableName) throws IOException { + return getHTableDescriptor(TableName.valueOf(tableName)); + } + + @Override + public HRegionLocation locateRegion(TableName tableName, byte[] row) throws IOException { return hconnection.locateRegion(tableName, row); } @Override + public HRegionLocation locateRegion(byte[] tableName, byte[] row) throws IOException { + return locateRegion(TableName.valueOf(tableName), row); + } + + @Override public void clearRegionCache() { hconnection.clearRegionCache(); } @Override - public void clearRegionCache(byte[] tableName) { + public void clearRegionCache(TableName tableName) { hconnection.clearRegionCache(tableName); } @Override + public void clearRegionCache(byte[] tableName) { + clearRegionCache(TableName.valueOf(tableName)); + } + + @Override public void deleteCachedRegionLocation(HRegionLocation location) { hconnection.deleteCachedRegionLocation(location); } @Override - public HRegionLocation relocateRegion(byte[] tableName, byte[] row) - throws IOException { + public HRegionLocation relocateRegion(TableName tableName, byte[] row) throws IOException { return hconnection.relocateRegion(tableName, row); } @Override - public void updateCachedLocations(byte[] tableName, byte[] rowkey, - Object exception, HRegionLocation source) { + public HRegionLocation relocateRegion(byte[] tableName, byte[] row) throws IOException { + return relocateRegion(TableName.valueOf(tableName), row); + } + + @Override + public void updateCachedLocations(TableName tableName, + byte[] rowkey, + Object exception, + HRegionLocation source) { hconnection.updateCachedLocations(tableName, rowkey, exception, source); } @Override + public void updateCachedLocations(byte[] tableName, + byte[] rowkey, + Object exception, + HRegionLocation source) { + updateCachedLocations(TableName.valueOf(tableName), rowkey, exception, source); + } + + @Override public HRegionLocation locateRegion(byte[] regionName) throws IOException { return hconnection.locateRegion(regionName); } @Override - public List locateRegions(byte[] tableName) - throws IOException { + public List locateRegions(TableName tableName) throws IOException { return hconnection.locateRegions(tableName); } @Override - public List locateRegions(byte[] tableName, - boolean useCache, boolean offlined) throws IOException { + public List locateRegions(byte[] tableName) throws IOException { + return locateRegions(TableName.valueOf(tableName)); + } + + @Override + public List locateRegions(TableName tableName, + boolean useCache, + boolean offlined) throws IOException { return hconnection.locateRegions(tableName, useCache, offlined); } @Override + public List locateRegions(byte[] tableName, + boolean useCache, + boolean offlined) throws IOException { + return locateRegions(TableName.valueOf(tableName)); + } + + @Override public MasterAdminService.BlockingInterface getMasterAdmin() throws IOException { return hconnection.getMasterAdmin(); } @@ -217,44 +278,86 @@ public class HConnectionWrapper implements HConnection { } @Override - public HRegionLocation getRegionLocation(byte[] tableName, byte[] row, - boolean reload) throws IOException { + public HRegionLocation getRegionLocation(TableName tableName, + byte[] row, boolean reload) throws IOException { return hconnection.getRegionLocation(tableName, row, reload); } @Override - public void processBatch(List actions, byte[] tableName, - ExecutorService pool, Object[] results) throws IOException, - InterruptedException { + public HRegionLocation getRegionLocation(byte[] tableName, + byte[] row, boolean reload) throws IOException { + return getRegionLocation(TableName.valueOf(tableName), row, reload); + } + + @Override + public void processBatch(List actions, TableName tableName, ExecutorService pool, + Object[] results) throws IOException, InterruptedException { hconnection.processBatch(actions, tableName, pool, results); } @Override - public void processBatchCallback(List list, - byte[] tableName, ExecutorService pool, Object[] results, - Callback callback) throws IOException, InterruptedException { + public void processBatch(List actions, byte[] tableName, ExecutorService pool, + Object[] results) throws IOException, InterruptedException { + processBatch(actions, TableName.valueOf(tableName), pool, results); + } + + @Override + public void processBatchCallback(List list, TableName tableName, + ExecutorService pool, + Object[] results, + Callback callback) + throws IOException, InterruptedException { hconnection.processBatchCallback(list, tableName, pool, results, callback); } @Override - public void setRegionCachePrefetch(byte[] tableName, boolean enable) { + public void processBatchCallback(List list, byte[] tableName, + ExecutorService pool, + Object[] results, + Callback callback) + throws IOException, InterruptedException { + processBatchCallback(list, TableName.valueOf(tableName), pool, results, callback); + } + + @Override + public void setRegionCachePrefetch(TableName tableName, boolean enable) { hconnection.setRegionCachePrefetch(tableName, enable); } @Override - public boolean getRegionCachePrefetch(byte[] tableName) { + public void setRegionCachePrefetch(byte[] tableName, boolean enable) { + setRegionCachePrefetch(TableName.valueOf(tableName), enable); + } + + @Override + public boolean getRegionCachePrefetch(TableName tableName) { return hconnection.getRegionCachePrefetch(tableName); } @Override + public boolean getRegionCachePrefetch(byte[] tableName) { + return getRegionCachePrefetch(TableName.valueOf(tableName)); + } + + @Override public int getCurrentNrHRS() throws IOException { return hconnection.getCurrentNrHRS(); } @Override - public HTableDescriptor[] getHTableDescriptors(List tableNames) - throws IOException { - return hconnection.getHTableDescriptors(tableNames); + public HTableDescriptor[] getHTableDescriptorsByTableName( + List tableNames) throws IOException { + return hconnection.getHTableDescriptorsByTableName(tableNames); + } + + @Override + public HTableDescriptor[] getHTableDescriptors( + List names) throws IOException { + List tableNames = new ArrayList(names.size()); + for(String name : names) { + tableNames.add(TableName.valueOf(name)); + } + return getHTableDescriptorsByTableName(tableNames); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index b53416d..9b31a4c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -26,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -51,7 +52,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hbase.util.Threads; import java.io.Closeable; @@ -119,7 +119,7 @@ import java.util.concurrent.TimeUnit; public class HTable implements HTableInterface { private static final Log LOG = LogFactory.getLog(HTable.class); protected HConnection connection; - private final byte [] tableName; + private final TableName tableName; private volatile Configuration configuration; protected List writeAsyncBuffer = new LinkedList(); private long writeBufferSize; @@ -150,10 +150,9 @@ public class HTable implements HTableInterface { */ public HTable(Configuration conf, final String tableName) throws IOException { - this(conf, Bytes.toBytes(tableName)); + this(conf, TableName.valueOf(tableName)); } - /** * Creates an object to access a HBase table. * Shares zookeeper connection and other resources with other HTable instances @@ -164,7 +163,24 @@ public class HTable implements HTableInterface { * @param tableName Name of the table. * @throws IOException if a remote or network exception occurs */ - public HTable(Configuration conf, final byte [] tableName) + public HTable(Configuration conf, final byte[] tableName) + throws IOException { + this(conf, TableName.valueOf(tableName)); + } + + + + /** + * Creates an object to access a HBase table. + * Shares zookeeper connection and other resources with other HTable instances + * created with the same conf instance. Uses already-populated + * region cache if one is available, populated by any other HTable instances + * sharing this conf instance. Recommended. + * @param conf Configuration object to use. + * @param tableName table name pojo + * @throws IOException if a remote or network exception occurs + */ + public HTable(Configuration conf, final TableName tableName) throws IOException { this.tableName = tableName; this.cleanupPoolOnClose = this.cleanupConnectionOnClose = true; @@ -206,6 +222,23 @@ public class HTable implements HTableInterface { */ public HTable(Configuration conf, final byte[] tableName, final ExecutorService pool) throws IOException { + this(conf, TableName.valueOf(tableName), pool); + } + + /** + * Creates an object to access a HBase table. + * Shares zookeeper connection and other resources with other HTable instances + * created with the same conf instance. Uses already-populated + * region cache if one is available, populated by any other HTable instances + * sharing this conf instance. + * Use this constructor when the ExecutorService is externally managed. + * @param conf Configuration object to use. + * @param tableName Name of the table. + * @param pool ExecutorService to be used. + * @throws IOException if a remote or network exception occurs + */ + public HTable(Configuration conf, final TableName tableName, final ExecutorService pool) + throws IOException { this.connection = HConnectionManager.getConnection(conf); this.configuration = conf; this.pool = pool; @@ -229,6 +262,22 @@ public class HTable implements HTableInterface { */ public HTable(final byte[] tableName, final HConnection connection, final ExecutorService pool) throws IOException { + this(TableName.valueOf(tableName), connection, pool); + } + + /** + * Creates an object to access a HBase table. + * Shares zookeeper connection and other resources with other HTable instances + * created with the same connection instance. + * Use this constructor when the ExecutorService and HConnection instance are + * externally managed. + * @param tableName Name of the table. + * @param connection HConnection to be used. + * @param pool ExecutorService to be used. + * @throws IOException if a remote or network exception occurs + */ + public HTable(TableName tableName, final HConnection connection, + final ExecutorService pool) throws IOException { if (connection == null || connection.isClosed()) { throw new IllegalArgumentException("Connection is null or closed."); } @@ -245,7 +294,7 @@ public class HTable implements HTableInterface { * For internal testing. */ protected HTable(){ - tableName = new byte[]{}; + tableName = null; cleanupPoolOnClose = false; cleanupConnectionOnClose = false; } @@ -255,7 +304,7 @@ public class HTable implements HTableInterface { */ private void finishSetup() throws IOException { this.connection.locateRegion(tableName, HConstants.EMPTY_START_ROW); - this.operationTimeout = HTableDescriptor.isMetaTable(tableName) ? + this.operationTimeout = HTableDescriptor.isSystemTable(tableName) ? this.configuration.getInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT): this.configuration.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, @@ -299,7 +348,7 @@ public class HTable implements HTableInterface { */ @Deprecated public static boolean isTableEnabled(String tableName) throws IOException { - return isTableEnabled(Bytes.toBytes(tableName)); + return isTableEnabled(TableName.valueOf(tableName)); } /** @@ -309,10 +358,24 @@ public class HTable implements HTableInterface { * @param tableName Name of table to check. * @return {@code true} if table is online. * @throws IOException if a remote or network exception occurs - * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])} + * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])} */ @Deprecated public static boolean isTableEnabled(byte[] tableName) throws IOException { + return isTableEnabled(TableName.valueOf(tableName)); + } + + /** + * Tells whether or not a table is enabled or not. This method creates a + * new HBase configuration, so it might make your unit tests fail due to + * incorrect ZK client port. + * @param tableName Name of table to check. + * @return {@code true} if table is online. + * @throws IOException if a remote or network exception occurs + * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])} + */ + @Deprecated + public static boolean isTableEnabled(TableName tableName) throws IOException { return isTableEnabled(HBaseConfiguration.create(), tableName); } @@ -327,7 +390,7 @@ public class HTable implements HTableInterface { @Deprecated public static boolean isTableEnabled(Configuration conf, String tableName) throws IOException { - return isTableEnabled(conf, Bytes.toBytes(tableName)); + return isTableEnabled(conf, TableName.valueOf(tableName)); } /** @@ -336,11 +399,25 @@ public class HTable implements HTableInterface { * @param tableName Name of table to check. * @return {@code true} if table is online. * @throws IOException if a remote or network exception occurs - * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[] tableName)} + * @deprecated use {@link HBaseAdmin#isTableEnabled(byte[])} + */ + @Deprecated + public static boolean isTableEnabled(Configuration conf, byte[] tableName) + throws IOException { + return isTableEnabled(conf, TableName.valueOf(tableName)); + } + + /** + * Tells whether or not a table is enabled or not. + * @param conf The Configuration object to use. + * @param tableName Name of table to check. + * @return {@code true} if table is online. + * @throws IOException if a remote or network exception occurs + * @deprecated use {@link HBaseAdmin#isTableEnabled(org.apache.hadoop.hbase.TableName tableName)} */ @Deprecated public static boolean isTableEnabled(Configuration conf, - final byte[] tableName) throws IOException { + final TableName tableName) throws IOException { return HConnectionManager.execute(new HConnectable(conf) { @Override public Boolean connect(HConnection connection) throws IOException { @@ -388,7 +465,12 @@ public class HTable implements HTableInterface { */ @Override public byte [] getTableName() { - return this.tableName; + return this.tableName.getName(); + } + + @Override + public TableName getName() { + return tableName; } /** @@ -502,7 +584,7 @@ public class HTable implements HTableInterface { */ public NavigableMap getRegionLocations() throws IOException { // TODO: Odd that this returns a Map of HRI to SN whereas getRegionLocation, singular, returns an HRegionLocation. - return MetaScanner.allTableRegions(getConfiguration(), this.connection, getTableName(), false); + return MetaScanner.allTableRegions(getConfiguration(), this.connection, getName(), false); } /** @@ -611,7 +693,8 @@ public class HTable implements HTableInterface { if (scan.getCaching() <= 0) { scan.setCaching(getScannerCaching()); } - return new ClientScanner(getConfiguration(), scan, getTableName(), this.connection); + return new ClientScanner(getConfiguration(), scan, + getName(), this.connection); } /** @@ -641,7 +724,7 @@ public class HTable implements HTableInterface { @Override public Result get(final Get get) throws IOException { RegionServerCallable callable = new RegionServerCallable(this.connection, - getTableName(), get.getRow()) { + getName(), get.getRow()) { public Result call() throws IOException { return ProtobufUtil.get(getStub(), getLocation().getRegionInfo().getRegionName(), get); } @@ -813,7 +896,7 @@ public class HTable implements HTableInterface { if (synchronous || ap.hasError()) { if (ap.hasError() && LOG.isDebugEnabled()) { - LOG.debug(Bytes.toString(tableName) + ": One or more of the operations have failed -" + + LOG.debug(tableName + ": One or more of the operations have failed -" + " waiting for all operation in progress to finish (successfully or not)"); } ap.waitUntilDone(); @@ -845,7 +928,7 @@ public class HTable implements HTableInterface { @Override public void mutateRow(final RowMutations rm) throws IOException { RegionServerCallable callable = - new RegionServerCallable(connection, getTableName(), rm.getRow()) { + new RegionServerCallable(connection, getName(), rm.getRow()) { public Void call() throws IOException { try { MultiRequest request = RequestConverter.buildMultiRequest( @@ -870,7 +953,7 @@ public class HTable implements HTableInterface { "Invalid arguments to append, no columns specified"); } RegionServerCallable callable = - new RegionServerCallable(this.connection, getTableName(), append.getRow()) { + new RegionServerCallable(this.connection, getName(), append.getRow()) { public Result call() throws IOException { try { MutateRequest request = RequestConverter.buildMutateRequest( @@ -897,7 +980,7 @@ public class HTable implements HTableInterface { "Invalid arguments to increment, no columns specified"); } RegionServerCallable callable = new RegionServerCallable(this.connection, - getTableName(), increment.getRow()) { + getName(), increment.getRow()) { public Result call() throws IOException { try { MutateRequest request = RequestConverter.buildMutateRequest( @@ -944,7 +1027,7 @@ public class HTable implements HTableInterface { } RegionServerCallable callable = - new RegionServerCallable(connection, getTableName(), row) { + new RegionServerCallable(connection, getName(), row) { public Long call() throws IOException { try { MutateRequest request = RequestConverter.buildMutateRequest( @@ -972,7 +1055,7 @@ public class HTable implements HTableInterface { final Put put) throws IOException { RegionServerCallable callable = - new RegionServerCallable(connection, getTableName(), row) { + new RegionServerCallable(connection, getName(), row) { public Boolean call() throws IOException { try { MutateRequest request = RequestConverter.buildMutateRequest( @@ -998,7 +1081,7 @@ public class HTable implements HTableInterface { final Delete delete) throws IOException { RegionServerCallable callable = - new RegionServerCallable(connection, getTableName(), row) { + new RegionServerCallable(connection, getName(), row) { public Boolean call() throws IOException { try { MutateRequest request = RequestConverter.buildMutateRequest( @@ -1020,7 +1103,7 @@ public class HTable implements HTableInterface { @Override public boolean exists(final Get get) throws IOException { RegionServerCallable callable = - new RegionServerCallable(connection, getTableName(), get.getRow()) { + new RegionServerCallable(connection, getName(), get.getRow()) { public Boolean call() throws IOException { try { GetRequest request = RequestConverter.buildGetRequest( @@ -1124,7 +1207,7 @@ public class HTable implements HTableInterface { Callable> callable = new Callable>() { public List call() throws Exception { RegionServerCallable> callable = - new RegionServerCallable>(connection, getTableName(), + new RegionServerCallable>(connection, getName(), getsByRegionEntry.getValue().get(0).getRow()) { public List call() throws IOException { try { @@ -1139,7 +1222,7 @@ public class HTable implements HTableInterface { } }; return rpcCallerFactory.> newCaller().callWithRetries(callable, - operationTimeout); + operationTimeout); } }; futures.put(getsByRegionEntry.getKey(), pool.submit(callable)); @@ -1352,6 +1435,12 @@ public class HTable implements HTableInterface { */ public static void setRegionCachePrefetch(final byte[] tableName, final boolean enable) throws IOException { + setRegionCachePrefetch(TableName.valueOf(tableName), enable); + } + + public static void setRegionCachePrefetch( + final TableName tableName, + final boolean enable) throws IOException { HConnectionManager.execute(new HConnectable(HBaseConfiguration .create()) { @Override @@ -1374,6 +1463,12 @@ public class HTable implements HTableInterface { */ public static void setRegionCachePrefetch(final Configuration conf, final byte[] tableName, final boolean enable) throws IOException { + setRegionCachePrefetch(conf, TableName.valueOf(tableName), enable); + } + + public static void setRegionCachePrefetch(final Configuration conf, + final TableName tableName, + final boolean enable) throws IOException { HConnectionManager.execute(new HConnectable(conf) { @Override public Void connect(HConnection connection) throws IOException { @@ -1393,6 +1488,11 @@ public class HTable implements HTableInterface { */ public static boolean getRegionCachePrefetch(final Configuration conf, final byte[] tableName) throws IOException { + return getRegionCachePrefetch(conf, TableName.valueOf(tableName)); + } + + public static boolean getRegionCachePrefetch(final Configuration conf, + final TableName tableName) throws IOException { return HConnectionManager.execute(new HConnectable(conf) { @Override public Boolean connect(HConnection connection) throws IOException { @@ -1409,6 +1509,11 @@ public class HTable implements HTableInterface { * @throws IOException */ public static boolean getRegionCachePrefetch(final byte[] tableName) throws IOException { + return getRegionCachePrefetch(TableName.valueOf(tableName)); + } + + public static boolean getRegionCachePrefetch( + final TableName tableName) throws IOException { return HConnectionManager.execute(new HConnectable( HBaseConfiguration.create()) { @Override @@ -1416,7 +1521,7 @@ public class HTable implements HTableInterface { return connection.getRegionCachePrefetch(tableName); } }); - } + } /** * Explicitly clears the region cache to fetch the latest value from META. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java index 889f50b..a15b186 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java @@ -23,6 +23,7 @@ import com.google.protobuf.ServiceException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.coprocessor.Batch; @@ -50,6 +51,11 @@ public interface HTableInterface extends Closeable { byte[] getTableName(); /** + * Gets the fully qualified table name instance of this table. + */ + TableName getName(); + + /** * Returns the {@link Configuration} object used by this instance. *

* The reference returned is not a copy, so any change made to it will diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java index 35240f3..946bd85 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java @@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ZooKeeperConnectionException; @@ -67,7 +68,7 @@ public class HTableMultiplexer { static final String TABLE_MULTIPLEXER_FLUSH_FREQ_MS = "hbase.tablemultiplexer.flush.frequency.ms"; - private Map tableNameToHTableMap; + private Map tableNameToHTableMap; /** The map between each region server to its corresponding buffer queue */ private Map> @@ -92,7 +93,7 @@ public class HTableMultiplexer { this.serverToBufferQueueMap = new ConcurrentHashMap>(); this.serverToFlushWorkerMap = new ConcurrentHashMap(); - this.tableNameToHTableMap = new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR); + this.tableNameToHTableMap = new ConcurrentSkipListMap(); this.retryNum = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); this.perRegionServerBufferQueueSize = perRegionServerBufferQueueSize; @@ -101,24 +102,28 @@ public class HTableMultiplexer { /** * The put request will be buffered by its corresponding buffer queue. Return false if the queue * is already full. - * @param table + * @param tableName * @param put * @return true if the request can be accepted by its corresponding buffer queue. * @throws IOException */ - public boolean put(final byte[] table, final Put put) throws IOException { - return put(table, put, this.retryNum); + public boolean put(TableName tableName, final Put put) throws IOException { + return put(tableName, put, this.retryNum); + } + + public boolean put(byte[] tableName, final Put put) throws IOException { + return put(TableName.valueOf(tableName), put); } /** * The puts request will be buffered by their corresponding buffer queue. * Return the list of puts which could not be queued. - * @param table + * @param tableName * @param puts * @return the list of puts which could not be queued * @throws IOException */ - public List put(final byte[] table, final List puts) + public List put(TableName tableName, final List puts) throws IOException { if (puts == null) return null; @@ -126,7 +131,7 @@ public class HTableMultiplexer { List failedPuts = null; boolean result; for (Put put : puts) { - result = put(table, put, this.retryNum); + result = put(tableName, put, this.retryNum); if (result == false) { // Create the failed puts list if necessary @@ -140,24 +145,29 @@ public class HTableMultiplexer { return failedPuts; } + public List put(byte[] tableName, final List puts) throws IOException { + return put(TableName.valueOf(tableName), puts); + } + + /** * The put request will be buffered by its corresponding buffer queue. And the put request will be * retried before dropping the request. * Return false if the queue is already full. - * @param table + * @param tableName * @param put * @param retry * @return true if the request can be accepted by its corresponding buffer queue. * @throws IOException */ - public boolean put(final byte[] table, final Put put, int retry) + public boolean put(final TableName tableName, final Put put, int retry) throws IOException { if (retry <= 0) { return false; } LinkedBlockingQueue queue; - HTable htable = getHTable(table); + HTable htable = getHTable(tableName); try { htable.validatePut(put); HRegionLocation loc = htable.getRegionLocation(put.getRow(), false); @@ -175,6 +185,11 @@ public class HTableMultiplexer { return false; } + public boolean put(final byte[] tableName, final Put put, int retry) + throws IOException { + return put(TableName.valueOf(tableName), put, retry); + } + /** * @return the current HTableMultiplexerStatus */ @@ -183,14 +198,14 @@ public class HTableMultiplexer { } - private HTable getHTable(final byte[] table) throws IOException { - HTable htable = this.tableNameToHTableMap.get(table); + private HTable getHTable(TableName tableName) throws IOException { + HTable htable = this.tableNameToHTableMap.get(tableName); if (htable == null) { synchronized (this.tableNameToHTableMap) { - htable = this.tableNameToHTableMap.get(table); + htable = this.tableNameToHTableMap.get(tableName); if (htable == null) { - htable = new HTable(conf, table); - this.tableNameToHTableMap.put(table, htable); + htable = new HTable(conf, tableName); + this.tableNameToHTableMap.put(tableName, htable); } } } @@ -435,7 +450,7 @@ public class HTableMultiplexer { HRegionLocation oldLoc) throws IOException { Put failedPut = failedPutStatus.getPut(); // The currentPut is failed. So get the table name for the currentPut. - byte[] tableName = failedPutStatus.getRegionInfo().getTableName(); + TableName tableName = failedPutStatus.getRegionInfo().getTableName(); // Decrease the retry count int retryCount = failedPutStatus.getRetryCount() - 1; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java index 762e3eb..a641ec6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java @@ -23,6 +23,7 @@ import com.google.protobuf.ServiceException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.client.coprocessor.Batch; @@ -340,6 +341,11 @@ public class HTablePool implements Closeable { } @Override + public TableName getName() { + return table.getName(); + } + + @Override public Configuration getConfiguration() { checkState(); return table.getConfiguration(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java index cd7e78e..d53165e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java @@ -30,6 +30,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; @@ -76,10 +77,10 @@ public class MetaScanner { * @throws IOException e */ public static void metaScan(Configuration configuration, HConnection connection, - MetaScannerVisitor visitor, byte [] userTableName) + MetaScannerVisitor visitor, TableName userTableName) throws IOException { metaScan(configuration, connection, visitor, userTableName, null, Integer.MAX_VALUE, - HConstants.META_TABLE_NAME); + TableName.META_TABLE_NAME); } /** @@ -98,11 +99,11 @@ public class MetaScanner { * @throws IOException e */ public static void metaScan(Configuration configuration, - MetaScannerVisitor visitor, byte [] userTableName, byte[] row, + MetaScannerVisitor visitor, TableName userTableName, byte[] row, int rowLimit) throws IOException { metaScan(configuration, null, visitor, userTableName, row, rowLimit, - HConstants.META_TABLE_NAME); + TableName.META_TABLE_NAME); } /** @@ -123,15 +124,15 @@ public class MetaScanner { * @throws IOException e */ public static void metaScan(Configuration configuration, HConnection connection, - final MetaScannerVisitor visitor, final byte[] tableName, - final byte[] row, final int rowLimit, final byte[] metaTableName) + final MetaScannerVisitor visitor, final TableName tableName, + final byte[] row, final int rowLimit, final TableName metaTableName) throws IOException { int rowUpperLimit = rowLimit > 0 ? rowLimit: Integer.MAX_VALUE; HTable metaTable; if (connection == null) { - metaTable = new HTable(configuration, HConstants.META_TABLE_NAME, null); + metaTable = new HTable(configuration, TableName.META_TABLE_NAME, null); } else { - metaTable = new HTable(HConstants.META_TABLE_NAME, connection, null); + metaTable = new HTable(TableName.META_TABLE_NAME, connection, null); } // Calculate startrow for scan. byte[] startRow; @@ -142,17 +143,18 @@ public class MetaScanner { byte[] searchRow = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false); Result startRowResult = metaTable.getRowOrBefore(searchRow, HConstants.CATALOG_FAMILY); if (startRowResult == null) { - throw new TableNotFoundException("Cannot find row in .META. for table: " + - Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow)); + throw new TableNotFoundException("Cannot find row in "+ TableName + .META_TABLE_NAME.getNameAsString()+" for table: " + + tableName + ", row=" + Bytes.toStringBinary(searchRow)); } HRegionInfo regionInfo = getHRegionInfo(startRowResult); if (regionInfo == null) { throw new IOException("HRegionInfo was null or empty in Meta for " + - Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow)); + tableName + ", row=" + Bytes.toStringBinary(searchRow)); } byte[] rowBefore = regionInfo.getStartKey(); startRow = HRegionInfo.createRegionName(tableName, rowBefore, HConstants.ZEROES, false); - } else if (tableName == null || tableName.length == 0) { + } else if (tableName == null || tableName.getName().length == 0) { // Full META scan startRow = HConstants.EMPTY_START_ROW; } else { @@ -165,7 +167,7 @@ public class MetaScanner { HConstants.DEFAULT_HBASE_META_SCANNER_CACHING)); scan.setCaching(rows); if (LOG.isTraceEnabled()) { - LOG.trace("Scanning " + Bytes.toString(metaTableName) + " starting at row=" + + LOG.trace("Scanning " + metaTableName.getNameAsString() + " starting at row=" + Bytes.toStringBinary(startRow) + " for max=" + rowUpperLimit + " with caching=" + rows); } // Run the scan @@ -267,11 +269,11 @@ public class MetaScanner { * @throws IOException */ public static NavigableMap allTableRegions(Configuration conf, - HConnection connection, - final byte [] tablename, final boolean offlined) throws IOException { + HConnection connection, final TableName tableName, + final boolean offlined) throws IOException { final NavigableMap regions = new TreeMap(); - MetaScannerVisitor visitor = new TableMetaScannerVisitor(tablename) { + MetaScannerVisitor visitor = new TableMetaScannerVisitor(tableName) { @Override public boolean processRowInternal(Result rowResult) throws IOException { HRegionInfo info = getHRegionInfo(rowResult); @@ -280,7 +282,7 @@ public class MetaScanner { return true; } }; - metaScan(conf, connection, visitor, tablename); + metaScan(conf, connection, visitor, tableName); return regions; } @@ -340,9 +342,9 @@ public class MetaScanner { * META entries for daughters are available during splits. */ public static abstract class TableMetaScannerVisitor extends DefaultMetaScannerVisitor { - private byte[] tableName; + private TableName tableName; - public TableMetaScannerVisitor(byte[] tableName) { + public TableMetaScannerVisitor(TableName tableName) { super(); this.tableName = tableName; } @@ -353,7 +355,7 @@ public class MetaScanner { if (info == null) { return true; } - if (!(Bytes.equals(info.getTableName(), tableName))) { + if (!(info.getTableName().equals(tableName))) { return false; } return super.processRow(rowResult); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java index 8505c1e..67b3aff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java @@ -23,6 +23,7 @@ import java.util.List; import java.util.Map; import org.apache.hadoop.hbase.CellScannable; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -42,7 +43,7 @@ import com.google.protobuf.ServiceException; class MultiServerCallable extends RegionServerCallable { private final MultiAction multi; - MultiServerCallable(final HConnection connection, final byte [] tableName, + MultiServerCallable(final HConnection connection, final TableName tableName, final HRegionLocation location, final MultiAction multi) { super(connection, tableName, null); this.multi = multi; @@ -120,4 +121,4 @@ class MultiServerCallable extends RegionServerCallable { // Use the location we were given in the constructor rather than go look it up. setStub(getConnection().getClient(getLocation().getServerName())); } -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java index df067e5..9383b9c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java @@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.RegionMovedException; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.util.Bytes; @@ -44,7 +45,7 @@ public abstract class RegionServerCallable implements RetryingCallable { // Public because used outside of this package over in ipc. static final Log LOG = LogFactory.getLog(RegionServerCallable.class); private final HConnection connection; - private final byte [] tableName; + private final TableName tableName; private final byte [] row; private HRegionLocation location; private ClientService.BlockingInterface stub; @@ -56,7 +57,7 @@ public abstract class RegionServerCallable implements RetryingCallable { * @param tableName Table name to which row belongs. * @param row The row we want in tableName. */ - public RegionServerCallable(HConnection connection, byte [] tableName, byte [] row) { + public RegionServerCallable(HConnection connection, TableName tableName, byte [] row) { this.connection = connection; this.tableName = tableName; this.row = row; @@ -71,7 +72,7 @@ public abstract class RegionServerCallable implements RetryingCallable { public void prepare(final boolean reload) throws IOException { this.location = connection.getRegionLocation(tableName, row, reload); if (this.location == null) { - throw new IOException("Failed to find location, tableName=" + Bytes.toString(tableName) + + throw new IOException("Failed to find location, tableName=" + tableName + ", row=" + Bytes.toString(row) + ", reload=" + reload); } setStub(getConnection().getClient(getLocation().getServerName())); @@ -100,7 +101,7 @@ public abstract class RegionServerCallable implements RetryingCallable { this.location = location; } - public byte [] getTableName() { + public TableName getTableName() { return this.tableName; } @@ -129,7 +130,7 @@ public abstract class RegionServerCallable implements RetryingCallable { @Override public String getExceptionMessageAdditionalDetail() { - return "row '" + Bytes.toString(row) + "' on table '" + Bytes.toString(tableName); + return "row '" + Bytes.toString(row) + "' on table '" + tableName; } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java index a48c571..95d8d1c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionLocation; /** @@ -46,7 +47,7 @@ interface Registry { * @param enabled Return true if table is enabled * @throws IOException */ - boolean isTableOnlineState(byte [] tableName, boolean enabled) throws IOException; + boolean isTableOnlineState(TableName tableName, boolean enabled) throws IOException; /** * @return Count of 'running' regionservers diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java index c701e37..1236982 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java @@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; @@ -89,7 +90,7 @@ public class ScannerCallable extends RegionServerCallable { * @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable * won't collect metrics */ - public ScannerCallable (HConnection connection, byte [] tableName, Scan scan, + public ScannerCallable (HConnection connection, TableName tableName, Scan scan, ScanMetrics scanMetrics) { super(connection, tableName, scan.getStartRow()); this.scan = scan; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java index 87c4f9b..d6fcb52 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/UnmodifyableHTableDescriptor.java @@ -40,7 +40,7 @@ public class UnmodifyableHTableDescriptor extends HTableDescriptor { * @param desc */ UnmodifyableHTableDescriptor(final HTableDescriptor desc) { - super(desc.getName(), getUnmodifyableFamilies(desc), desc.getValues()); + super(desc.getTableName(), getUnmodifyableFamilies(desc), desc.getValues()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java index 5f6f45c..3d765c6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java @@ -21,10 +21,10 @@ import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.hbase.zookeeper.ZKTableReadOnly; @@ -95,15 +95,14 @@ class ZooKeeperRegistry implements Registry { } @Override - public boolean isTableOnlineState(byte [] tableName, boolean enabled) + public boolean isTableOnlineState(TableName tableName, boolean enabled) throws IOException { - String tableNameStr = Bytes.toString(tableName); ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher(); try { if (enabled) { - return ZKTableReadOnly.isEnabledTable(zkw, tableNameStr); + return ZKTableReadOnly.isEnabledTable(zkw, tableName); } - return ZKTableReadOnly.isDisabledTable(zkw, tableNameStr); + return ZKTableReadOnly.isDisabledTable(zkw, tableName); } catch (KeeperException e) { throw new IOException("Enable/Disable failed", e); } finally { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java index f8459f4..385fe62 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java @@ -26,6 +26,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.HTable; @@ -99,7 +100,7 @@ public class AggregationClient { * & propagated to it. */ public R max( - final byte[] tableName, final ColumnInterpreter ci, final Scan scan) + final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { HTable table = null; try { @@ -188,7 +189,7 @@ public class AggregationClient { * @throws Throwable */ public R min( - final byte[] tableName, final ColumnInterpreter ci, final Scan scan) + final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { HTable table = null; try { @@ -268,7 +269,7 @@ public class AggregationClient { * @throws Throwable */ public long rowCount( - final byte[] tableName, final ColumnInterpreter ci, final Scan scan) + final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { HTable table = null; try { @@ -342,7 +343,7 @@ public class AggregationClient { * @throws Throwable */ public S sum( - final byte[] tableName, final ColumnInterpreter ci, final Scan scan) + final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { HTable table = null; try { @@ -415,7 +416,7 @@ public class AggregationClient { * @throws Throwable */ private Pair getAvgArgs( - final byte[] tableName, final ColumnInterpreter ci, final Scan scan) + final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { HTable table = null; try { @@ -498,7 +499,7 @@ public class AggregationClient { * @throws Throwable */ public - double avg(final byte[] tableName, + double avg(final TableName tableName, final ColumnInterpreter ci, Scan scan) throws Throwable { Pair p = getAvgArgs(tableName, ci, scan); return ci.divideForAvg(p.getFirst(), p.getSecond()); @@ -606,7 +607,7 @@ public class AggregationClient { * @throws Throwable */ public - double std(final byte[] tableName, ColumnInterpreter ci, + double std(final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { HTable table = null; try { @@ -719,7 +720,7 @@ public class AggregationClient { * @throws Throwable */ public - R median(final byte[] tableName, ColumnInterpreter ci, + R median(final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { HTable table = null; try { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/SecureBulkLoadClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/SecureBulkLoadClient.java index 8abd49c..3882163 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/SecureBulkLoadClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/SecureBulkLoadClient.java @@ -20,10 +20,12 @@ package org.apache.hadoop.hbase.client.coprocessor; import com.google.protobuf.ByteString; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; import org.apache.hadoop.hbase.ipc.ServerRpcController; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos; import org.apache.hadoop.hbase.security.SecureBulkLoadUtil; @@ -45,7 +47,7 @@ public class SecureBulkLoadClient { this.table = table; } - public String prepareBulkLoad(final byte[] tableName) throws IOException { + public String prepareBulkLoad(final TableName tableName) throws IOException { try { return table.coprocessorService(SecureBulkLoadProtos.SecureBulkLoadService.class, @@ -61,7 +63,7 @@ public class SecureBulkLoadClient { SecureBulkLoadProtos.PrepareBulkLoadRequest request = SecureBulkLoadProtos.PrepareBulkLoadRequest.newBuilder() - .setTableName(com.google.protobuf.ByteString.copyFrom(tableName)).build(); + .setTableName(ProtobufUtil.toProtoTableName(tableName)).build(); instance.prepareBulkLoad(controller, request, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java index 0fd064f..645801f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java @@ -23,6 +23,7 @@ import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.RegionServerCallable; import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; @@ -48,13 +49,13 @@ public class RegionCoprocessorRpcChannel extends CoprocessorRpcChannel{ private static Log LOG = LogFactory.getLog(RegionCoprocessorRpcChannel.class); private final HConnection connection; - private final byte[] table; + private final TableName table; private final byte[] row; private byte[] lastRegion; private RpcRetryingCallerFactory rpcFactory; - public RegionCoprocessorRpcChannel(HConnection conn, byte[] table, byte[] row) { + public RegionCoprocessorRpcChannel(HConnection conn, TableName table, byte[] row) { this.connection = conn; this.table = table; this.row = row; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 3c6ab5d..9c8ec04 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -47,6 +48,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; @@ -1563,7 +1565,7 @@ public final class ProtobufUtil { * @return the converted Permission */ public static Permission toPermission(AccessControlProtos.Permission proto) { - if (proto.hasTable()) { + if (proto.hasTableName()) { return toTablePermission(proto); } else { List actions = toPermissionActions(proto.getActionList()); @@ -1582,9 +1584,9 @@ public final class ProtobufUtil { byte[] qualifier = null; byte[] family = null; - byte[] table = null; + TableName table = null; - if (proto.hasTable()) table = proto.getTable().toByteArray(); + if (proto.hasTableName()) table = ProtobufUtil.toTableName(proto.getTableName()); if (proto.hasFamily()) family = proto.getFamily().toByteArray(); if (proto.hasQualifier()) qualifier = proto.getQualifier().toByteArray(); @@ -1603,7 +1605,7 @@ public final class ProtobufUtil { if (perm instanceof TablePermission) { TablePermission tablePerm = (TablePermission)perm; if (tablePerm.hasTable()) { - builder.setTable(ByteString.copyFrom(tablePerm.getTable())); + builder.setTableName(ProtobufUtil.toProtoTableName(tablePerm.getTable())); } if (tablePerm.hasFamily()) { builder.setFamily(ByteString.copyFrom(tablePerm.getFamily())); @@ -1692,7 +1694,7 @@ public final class ProtobufUtil { permissionBuilder.addAction(toPermissionAction(a)); } if (perm.hasTable()) { - permissionBuilder.setTable(ByteString.copyFrom(perm.getTable())); + permissionBuilder.setTableName(ProtobufUtil.toProtoTableName(perm.getTable())); } if (perm.hasFamily()) { permissionBuilder.setFamily(ByteString.copyFrom(perm.getFamily())); @@ -1719,9 +1721,9 @@ public final class ProtobufUtil { byte[] qualifier = null; byte[] family = null; - byte[] table = null; + TableName table = null; - if (permission.hasTable()) table = permission.getTable().toByteArray(); + if (permission.hasTableName()) table = ProtobufUtil.toTableName(permission.getTableName()); if (permission.hasFamily()) family = permission.getFamily().toByteArray(); if (permission.hasQualifier()) qualifier = permission.getQualifier().toByteArray(); @@ -1762,14 +1764,14 @@ public final class ProtobufUtil { * * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to grant permissions - * @param t optional table name + * @param tableName optional table name * @param f optional column family * @param q optional qualifier * @param actions the permissions to be granted * @throws ServiceException */ public static void grant(AccessControlService.BlockingInterface protocol, - String userShortName, byte[] t, byte[] f, byte[] q, + String userShortName, TableName tableName, byte[] f, byte[] q, Permission.Action... actions) throws ServiceException { List permActions = Lists.newArrayListWithCapacity(actions.length); @@ -1777,7 +1779,7 @@ public final class ProtobufUtil { permActions.add(ProtobufUtil.toPermissionAction(a)); } AccessControlProtos.GrantRequest request = RequestConverter. - buildGrantRequest(userShortName, t, f, q, permActions.toArray( + buildGrantRequest(userShortName, tableName, f, q, permActions.toArray( new AccessControlProtos.Permission.Action[actions.length])); protocol.grant(null, request); } @@ -1791,14 +1793,14 @@ public final class ProtobufUtil { * * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to revoke permissions - * @param t optional table name + * @param tableName optional table name * @param f optional column family * @param q optional qualifier * @param actions the permissions to be revoked * @throws ServiceException */ public static void revoke(AccessControlService.BlockingInterface protocol, - String userShortName, byte[] t, byte[] f, byte[] q, + String userShortName, TableName tableName, byte[] f, byte[] q, Permission.Action... actions) throws ServiceException { List permActions = Lists.newArrayListWithCapacity(actions.length); @@ -1806,7 +1808,7 @@ public final class ProtobufUtil { permActions.add(ProtobufUtil.toPermissionAction(a)); } AccessControlProtos.RevokeRequest request = RequestConverter. - buildRevokeRequest(userShortName, t, f, q, permActions.toArray( + buildRevokeRequest(userShortName, tableName, f, q, permActions.toArray( new AccessControlProtos.Permission.Action[actions.length])); protocol.revoke(null, request); } @@ -1822,11 +1824,11 @@ public final class ProtobufUtil { */ public static List getUserPermissions( AccessControlService.BlockingInterface protocol, - byte[] t) throws ServiceException { + TableName t) throws ServiceException { AccessControlProtos.UserPermissionsRequest.Builder builder = AccessControlProtos.UserPermissionsRequest.newBuilder(); if (t != null) { - builder.setTable(ByteString.copyFrom(t)); + builder.setTableName(ProtobufUtil.toProtoTableName(t)); } AccessControlProtos.UserPermissionsRequest request = builder.build(); AccessControlProtos.UserPermissionsResponse response = @@ -1988,6 +1990,28 @@ public final class ProtobufUtil { cell.getValue().toByteArray()); } + public static HBaseProtos.NamespaceDescriptor toProtoNamespaceDescriptor(NamespaceDescriptor ns) { + HBaseProtos.NamespaceDescriptor.Builder b = + HBaseProtos.NamespaceDescriptor.newBuilder() + .setName(ByteString.copyFromUtf8(ns.getName())); + for(Map.Entry entry: ns.getConfiguration().entrySet()) { + b.addConfiguration(HBaseProtos.NameStringPair.newBuilder() + .setName(entry.getKey()) + .setValue(entry.getValue())); + } + return b.build(); + } + + public static NamespaceDescriptor toNamespaceDescriptor( + HBaseProtos.NamespaceDescriptor desc) throws IOException { + NamespaceDescriptor.Builder b = + NamespaceDescriptor.create(desc.getName().toStringUtf8()); + for(HBaseProtos.NameStringPair prop : desc.getConfigurationList()) { + b.addConfiguration(prop.getName(), prop.getValue()); + } + return b.build(); + } + /** * Get an instance of the argument type declared in a class's signature. The * argument type is assumed to be a PB Message subclass, and the instance is @@ -2029,7 +2053,7 @@ public final class ProtobufUtil { // input / output paths are relative to the store dir // store dir is relative to region dir CompactionDescriptor.Builder builder = CompactionDescriptor.newBuilder() - .setTableName(ByteString.copyFrom(info.getTableName())) + .setTableName(ByteString.copyFrom(info.getTableName().getName())) .setEncodedRegionName(ByteString.copyFrom(info.getEncodedNameAsBytes())) .setFamilyName(ByteString.copyFrom(family)) .setStoreHomeDir(storeDir.getName()); //make relative @@ -2077,4 +2101,15 @@ public final class ProtobufUtil { return "row=" + Bytes.toString(proto.getRow().toByteArray()) + ", type=" + proto.getMutateType().toString(); } + + public static TableName toTableName(HBaseProtos.TableName tableNamePB) { + return TableName.valueOf(tableNamePB.getNamespace().toByteArray(), + tableNamePB.getQualifier().toByteArray()); + } + + public static HBaseProtos.TableName toProtoTableName(TableName tableName) { + return HBaseProtos.TableName.newBuilder() + .setNamespace(ByteString.copyFrom(tableName.getNamespace())) + .setQualifier(ByteString.copyFrom(tableName.getQualifier())).build(); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 53e6ac8..50b90c3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -22,6 +22,7 @@ import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.CellScannable; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -904,9 +905,9 @@ public final class RequestConverter { * @return an AddColumnRequest */ public static AddColumnRequest buildAddColumnRequest( - final byte [] tableName, final HColumnDescriptor column) { + final TableName tableName, final HColumnDescriptor column) { AddColumnRequest.Builder builder = AddColumnRequest.newBuilder(); - builder.setTableName(ByteString.copyFrom(tableName)); + builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setColumnFamilies(column.convert()); return builder.build(); } @@ -919,9 +920,9 @@ public final class RequestConverter { * @return a DeleteColumnRequest */ public static DeleteColumnRequest buildDeleteColumnRequest( - final byte [] tableName, final byte [] columnName) { + final TableName tableName, final byte [] columnName) { DeleteColumnRequest.Builder builder = DeleteColumnRequest.newBuilder(); - builder.setTableName(ByteString.copyFrom(tableName)); + builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setColumnName(ByteString.copyFrom(columnName)); return builder.build(); } @@ -934,9 +935,9 @@ public final class RequestConverter { * @return an ModifyColumnRequest */ public static ModifyColumnRequest buildModifyColumnRequest( - final byte [] tableName, final HColumnDescriptor column) { + final TableName tableName, final HColumnDescriptor column) { ModifyColumnRequest.Builder builder = ModifyColumnRequest.newBuilder(); - builder.setTableName(ByteString.copyFrom(tableName)); + builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setColumnFamilies(column.convert()); return builder.build(); } @@ -1019,9 +1020,9 @@ public final class RequestConverter { * @param tableName * @return a DeleteTableRequest */ - public static DeleteTableRequest buildDeleteTableRequest(final byte [] tableName) { + public static DeleteTableRequest buildDeleteTableRequest(final TableName tableName) { DeleteTableRequest.Builder builder = DeleteTableRequest.newBuilder(); - builder.setTableName(ByteString.copyFrom(tableName)); + builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); return builder.build(); } @@ -1031,9 +1032,9 @@ public final class RequestConverter { * @param tableName * @return an EnableTableRequest */ - public static EnableTableRequest buildEnableTableRequest(final byte [] tableName) { + public static EnableTableRequest buildEnableTableRequest(final TableName tableName) { EnableTableRequest.Builder builder = EnableTableRequest.newBuilder(); - builder.setTableName(ByteString.copyFrom(tableName)); + builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); return builder.build(); } @@ -1043,9 +1044,9 @@ public final class RequestConverter { * @param tableName * @return a DisableTableRequest */ - public static DisableTableRequest buildDisableTableRequest(final byte [] tableName) { + public static DisableTableRequest buildDisableTableRequest(final TableName tableName) { DisableTableRequest.Builder builder = DisableTableRequest.newBuilder(); - builder.setTableName(ByteString.copyFrom(tableName)); + builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); return builder.build(); } @@ -1077,9 +1078,9 @@ public final class RequestConverter { * @return a ModifyTableRequest */ public static ModifyTableRequest buildModifyTableRequest( - final byte [] table, final HTableDescriptor hTableDesc) { + final TableName tableName, final HTableDescriptor hTableDesc) { ModifyTableRequest.Builder builder = ModifyTableRequest.newBuilder(); - builder.setTableName(ByteString.copyFrom(table)); + builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setTableSchema(hTableDesc.convert()); return builder.build(); } @@ -1091,9 +1092,9 @@ public final class RequestConverter { * @return a GetSchemaAlterStatusRequest */ public static GetSchemaAlterStatusRequest buildGetSchemaAlterStatusRequest( - final byte [] tableName) { + final TableName tableName) { GetSchemaAlterStatusRequest.Builder builder = GetSchemaAlterStatusRequest.newBuilder(); - builder.setTableName(ByteString.copyFrom(tableName)); + builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); return builder.build(); } @@ -1104,11 +1105,11 @@ public final class RequestConverter { * @return a GetTableDescriptorsRequest */ public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest( - final List tableNames) { + final List tableNames) { GetTableDescriptorsRequest.Builder builder = GetTableDescriptorsRequest.newBuilder(); if (tableNames != null) { - for (String str : tableNames) { - builder.addTableNames(str); + for (TableName tableName : tableNames) { + builder.addTableNames(ProtobufUtil.toProtoTableName(tableName)); } } return builder.build(); @@ -1121,9 +1122,9 @@ public final class RequestConverter { * @return a GetTableDescriptorsRequest */ public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest( - final byte[] tableName) { + final TableName tableName) { return GetTableDescriptorsRequest.newBuilder() - .addTableNames(Bytes.toString(tableName)) + .addTableNames(ProtobufUtil.toProtoTableName(tableName)) .build(); } @@ -1204,22 +1205,22 @@ public final class RequestConverter { * Create a request to grant user permissions. * * @param username the short user name who to grant permissions - * @param table optional table name the permissions apply + * @param tableName optional table name the permissions apply * @param family optional column family * @param qualifier optional qualifier * @param actions the permissions to be granted * @return A {@link AccessControlProtos} GrantRequest */ public static AccessControlProtos.GrantRequest buildGrantRequest( - String username, byte[] table, byte[] family, byte[] qualifier, + String username, TableName tableName, byte[] family, byte[] qualifier, AccessControlProtos.Permission.Action... actions) { AccessControlProtos.Permission.Builder permissionBuilder = AccessControlProtos.Permission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } - if (table != null) { - permissionBuilder.setTable(ByteString.copyFrom(table)); + if (tableName != null) { + permissionBuilder.setTableName(ProtobufUtil.toProtoTableName(tableName)); } if (family != null) { permissionBuilder.setFamily(ByteString.copyFrom(family)); @@ -1240,22 +1241,22 @@ public final class RequestConverter { * Create a request to revoke user permissions. * * @param username the short user name whose permissions to be revoked - * @param table optional table name the permissions apply + * @param tableName optional table name the permissions apply * @param family optional column family * @param qualifier optional qualifier * @param actions the permissions to be revoked * @return A {@link AccessControlProtos} RevokeRequest */ public static AccessControlProtos.RevokeRequest buildRevokeRequest( - String username, byte[] table, byte[] family, byte[] qualifier, + String username, TableName tableName, byte[] family, byte[] qualifier, AccessControlProtos.Permission.Action... actions) { AccessControlProtos.Permission.Builder permissionBuilder = AccessControlProtos.Permission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } - if (table != null) { - permissionBuilder.setTable(ByteString.copyFrom(table)); + if (tableName != null) { + permissionBuilder.setTableName(ProtobufUtil.toProtoTableName(tableName)); } if (family != null) { permissionBuilder.setFamily(ByteString.copyFrom(family)); @@ -1290,4 +1291,4 @@ public final class RequestConverter { } return builder.build(); } -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java index f00c54a..0fd2f87 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.security.access; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.util.Bytes; @@ -36,7 +37,7 @@ import java.io.IOException; public class TablePermission extends Permission { private static Log LOG = LogFactory.getLog(TablePermission.class); - private byte[] table; + private TableName table; private byte[] family; private byte[] qualifier; @@ -52,7 +53,7 @@ public class TablePermission extends Permission { * @param family the family, can be null if a global permission on the table * @param assigned the list of allowed actions */ - public TablePermission(byte[] table, byte[] family, Action... assigned) { + public TablePermission(TableName table, byte[] family, Action... assigned) { this(table, family, null, assigned); } @@ -63,7 +64,7 @@ public class TablePermission extends Permission { * @param family the family, can be null if a global permission on the table * @param assigned the list of allowed actions */ - public TablePermission(byte[] table, byte[] family, byte[] qualifier, + public TablePermission(TableName table, byte[] family, byte[] qualifier, Action... assigned) { super(assigned); this.table = table; @@ -78,7 +79,7 @@ public class TablePermission extends Permission { * @param family the family, can be null if a global permission on the table * @param actionCodes the list of allowed action codes */ - public TablePermission(byte[] table, byte[] family, byte[] qualifier, + public TablePermission(TableName table, byte[] family, byte[] qualifier, byte[] actionCodes) { super(actionCodes); this.table = table; @@ -90,7 +91,7 @@ public class TablePermission extends Permission { return table != null; } - public byte[] getTable() { + public TableName getTable() { return table; } @@ -123,9 +124,9 @@ public class TablePermission extends Permission { * @return true if the action within the given scope is allowed * by this permission, false */ - public boolean implies(byte[] table, byte[] family, byte[] qualifier, + public boolean implies(TableName table, byte[] family, byte[] qualifier, Action action) { - if (!Bytes.equals(this.table, table)) { + if (!this.table.equals(table)) { return false; } @@ -154,8 +155,8 @@ public class TablePermission extends Permission { * @return true if the action is allowed over the given scope * by this permission, otherwise false */ - public boolean implies(byte[] table, KeyValue kv, Action action) { - if (!Bytes.equals(this.table, table)) { + public boolean implies(TableName table, KeyValue kv, Action action) { + if (!this.table.equals(table)) { return false; } @@ -183,8 +184,8 @@ public class TablePermission extends Permission { * column-qualifier specific permission, for example, implies() would still * return false. */ - public boolean matchesFamily(byte[] table, byte[] family, Action action) { - if (!Bytes.equals(this.table, table)) { + public boolean matchesFamily(TableName table, byte[] family, Action action) { + if (!this.table.equals(table)) { return false; } @@ -208,7 +209,7 @@ public class TablePermission extends Permission { * @return true if the table, family and qualifier match, * otherwise false */ - public boolean matchesFamilyQualifier(byte[] table, byte[] family, byte[] qualifier, + public boolean matchesFamilyQualifier(TableName table, byte[] family, byte[] qualifier, Action action) { if (!matchesFamily(table, family, action)) { return false; @@ -229,7 +230,7 @@ public class TablePermission extends Permission { } TablePermission other = (TablePermission)obj; - if (!(Bytes.equals(table, other.getTable()) && + if (!(table.equals(other.getTable()) && ((family == null && other.getFamily() == null) || Bytes.equals(family, other.getFamily())) && ((qualifier == null && other.getQualifier() == null) || @@ -247,7 +248,7 @@ public class TablePermission extends Permission { final int prime = 37; int result = super.hashCode(); if (table != null) { - result = prime * result + Bytes.hashCode(table); + result = prime * result + table.hashCode(); } if (family != null) { result = prime * result + Bytes.hashCode(family); @@ -260,7 +261,7 @@ public class TablePermission extends Permission { public String toString() { StringBuilder str = new StringBuilder("[TablePermission: ") - .append("table=").append(Bytes.toString(table)) + .append("table=").append(table) .append(", family=").append(Bytes.toString(family)) .append(", qualifier=").append(Bytes.toString(qualifier)) .append(", actions="); @@ -282,7 +283,8 @@ public class TablePermission extends Permission { @Override public void readFields(DataInput in) throws IOException { super.readFields(in); - table = Bytes.readByteArray(in); + byte[] tableBytes = Bytes.readByteArray(in); + table = TableName.valueOf(tableBytes); if (in.readBoolean()) { family = Bytes.readByteArray(in); } @@ -294,7 +296,7 @@ public class TablePermission extends Permission { @Override public void write(DataOutput out) throws IOException { super.write(out); - Bytes.writeByteArray(out, table); + Bytes.writeByteArray(out, table.getName()); out.writeBoolean(family != null); if (family != null) { Bytes.writeByteArray(out, family); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java index fd5b755..6303ab9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.security.access; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; import java.io.DataInput; @@ -69,7 +70,7 @@ public class UserPermission extends TablePermission { * table * @param assigned the list of allowed actions */ - public UserPermission(byte[] user, byte[] table, byte[] family, + public UserPermission(byte[] user, TableName table, byte[] family, Action... assigned) { super(table, family, assigned); this.user = user; @@ -86,7 +87,7 @@ public class UserPermission extends TablePermission { * over the entire column family * @param assigned the list of allowed actions */ - public UserPermission(byte[] user, byte[] table, byte[] family, + public UserPermission(byte[] user, TableName table, byte[] family, byte[] qualifier, Action... assigned) { super(table, family, qualifier, assigned); this.user = user; @@ -103,7 +104,7 @@ public class UserPermission extends TablePermission { * over the entire column family * @param actionCodes the list of allowed action codes */ - public UserPermission(byte[] user, byte[] table, byte[] family, + public UserPermission(byte[] user, TableName table, byte[] family, byte[] qualifier, byte[] actionCodes) { super(table, family, qualifier, actionCodes); this.user = user; @@ -117,8 +118,8 @@ public class UserPermission extends TablePermission { * Returns true if this permission describes a global user permission. */ public boolean isGlobal() { - byte[] tableName = getTable(); - return(tableName == null || tableName.length == 0); + TableName tableName = getTable(); + return(tableName == null); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java index c42f38d..9da1c84 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java @@ -20,7 +20,9 @@ package org.apache.hadoop.hbase.snapshot; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.util.Bytes; @@ -38,15 +40,16 @@ public class ClientSnapshotDescriptionUtils { */ public static void assertSnapshotRequestIsValid(HBaseProtos.SnapshotDescription snapshot) throws IllegalArgumentException { - // FIXME these method names is really bad - trunk will probably change - // .META. and -ROOT- snapshots are not allowed - if (HTableDescriptor.isMetaTable(Bytes.toBytes(snapshot.getTable()))) { - throw new IllegalArgumentException(".META. and -ROOT- snapshots are not allowed"); - } // make sure the snapshot name is valid - HTableDescriptor.isLegalTableName(Bytes.toBytes(snapshot.getName())); - // make sure the table name is valid - HTableDescriptor.isLegalTableName(Bytes.toBytes(snapshot.getTable())); + TableName.isLegalTableQualifierName(Bytes.toBytes(snapshot.getName())); + if(snapshot.hasTable()) { + // make sure the table name is valid, this will implicitly check validity + TableName tableName = TableName.valueOf(snapshot.getTable()); + + if (HTableDescriptor.isSystemTable(tableName)) { + throw new IllegalArgumentException("System table snapshots are not allowed"); + } + } } /** @@ -60,7 +63,8 @@ public class ClientSnapshotDescriptionUtils { if (ssd == null) { return null; } - return "{ ss=" + ssd.getName() + " table=" + ssd.getTable() - + " type=" + ssd.getType() + " }"; + return "{ ss=" + ssd.getName() + + " table=" + (ssd.hasTable()?TableName.valueOf(ssd.getTable()):"") + + " type=" + ssd.getType() + " }"; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/TablePartiallyOpenException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/TablePartiallyOpenException.java index 51882d6..bed248b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/TablePartiallyOpenException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/TablePartiallyOpenException.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.snapshot; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; import java.io.IOException; @@ -45,7 +46,14 @@ public class TablePartiallyOpenException extends IOException { /** * @param tableName Name of table that is partial open */ - public TablePartiallyOpenException(byte[] tableName) { - this(Bytes.toString(tableName)); + public TablePartiallyOpenException(TableName tableName) { + this(tableName.getNameAsString()); } + + /** + * @param tableName Name of table that is partial open + */ + public TablePartiallyOpenException(byte[] tableName) { + this(Bytes.toString(tableName)); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java index 1fb8eba..ecc5a0f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTable.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.zookeeper; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.zookeeper.KeeperException; @@ -56,8 +57,8 @@ public class ZKTable { * for every query. Synchronize access rather than use concurrent Map because * synchronization needs to span query of zk. */ - private final Map cache = - new HashMap(); + private final Map cache = + new HashMap(); // TODO: Make it so always a table znode. Put table schema here as well as table state. // Have watcher on table znode so all are notified of state or schema change. @@ -78,8 +79,9 @@ public class ZKTable { List children = ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.tableZNode); if (children == null) return; for (String child: children) { - ZooKeeperProtos.Table.State state = ZKTableReadOnly.getTableState(this.watcher, child); - if (state != null) this.cache.put(child, state); + TableName tableName = TableName.valueOf(child); + ZooKeeperProtos.Table.State state = ZKTableReadOnly.getTableState(this.watcher, tableName); + if (state != null) this.cache.put(tableName, state); } } } @@ -90,7 +92,7 @@ public class ZKTable { * @param tableName * @throws KeeperException unexpected zookeeper exception */ - public void setDisabledTable(String tableName) + public void setDisabledTable(TableName tableName) throws KeeperException { synchronized (this.cache) { if (!isDisablingOrDisabledTable(tableName)) { @@ -107,7 +109,7 @@ public class ZKTable { * @param tableName * @throws KeeperException unexpected zookeeper exception */ - public void setDisablingTable(final String tableName) + public void setDisablingTable(final TableName tableName) throws KeeperException { synchronized (this.cache) { if (!isEnabledOrDisablingTable(tableName)) { @@ -124,7 +126,7 @@ public class ZKTable { * @param tableName * @throws KeeperException unexpected zookeeper exception */ - public void setEnablingTable(final String tableName) + public void setEnablingTable(final TableName tableName) throws KeeperException { synchronized (this.cache) { if (!isDisabledOrEnablingTable(tableName)) { @@ -142,7 +144,7 @@ public class ZKTable { * @return if the operation succeeds or not * @throws KeeperException unexpected zookeeper exception */ - public boolean checkAndSetEnablingTable(final String tableName) + public boolean checkAndSetEnablingTable(final TableName tableName) throws KeeperException { synchronized (this.cache) { if (isEnablingTable(tableName)) { @@ -160,7 +162,7 @@ public class ZKTable { * @return if the operation succeeds or not * @throws KeeperException unexpected zookeeper exception */ - public boolean checkDisabledAndSetEnablingTable(final String tableName) + public boolean checkDisabledAndSetEnablingTable(final TableName tableName) throws KeeperException { synchronized (this.cache) { if (!isDisabledTable(tableName)) { @@ -178,7 +180,7 @@ public class ZKTable { * @return if the operation succeeds or not * @throws KeeperException unexpected zookeeper exception */ - public boolean checkEnabledAndSetDisablingTable(final String tableName) + public boolean checkEnabledAndSetDisablingTable(final TableName tableName) throws KeeperException { synchronized (this.cache) { if (this.cache.get(tableName) != null && !isEnabledTable(tableName)) { @@ -189,9 +191,9 @@ public class ZKTable { } } - private void setTableState(final String tableName, final ZooKeeperProtos.Table.State state) + private void setTableState(final TableName tableName, final ZooKeeperProtos.Table.State state) throws KeeperException { - String znode = ZKUtil.joinZNode(this.watcher.tableZNode, tableName); + String znode = ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString()); if (ZKUtil.checkExists(this.watcher, znode) == -1) { ZKUtil.createAndFailSilent(this.watcher, znode); } @@ -204,41 +206,41 @@ public class ZKTable { } } - public boolean isDisabledTable(final String tableName) { + public boolean isDisabledTable(final TableName tableName) { return isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED); } - public boolean isDisablingTable(final String tableName) { + public boolean isDisablingTable(final TableName tableName) { return isTableState(tableName, ZooKeeperProtos.Table.State.DISABLING); } - public boolean isEnablingTable(final String tableName) { + public boolean isEnablingTable(final TableName tableName) { return isTableState(tableName, ZooKeeperProtos.Table.State.ENABLING); } - public boolean isEnabledTable(String tableName) { + public boolean isEnabledTable(TableName tableName) { return isTableState(tableName, ZooKeeperProtos.Table.State.ENABLED); } - public boolean isDisablingOrDisabledTable(final String tableName) { + public boolean isDisablingOrDisabledTable(final TableName tableName) { synchronized (this.cache) { return isDisablingTable(tableName) || isDisabledTable(tableName); } } - public boolean isEnabledOrDisablingTable(final String tableName) { + public boolean isEnabledOrDisablingTable(final TableName tableName) { synchronized (this.cache) { return isEnabledTable(tableName) || isDisablingTable(tableName); } } - public boolean isDisabledOrEnablingTable(final String tableName) { + public boolean isDisabledOrEnablingTable(final TableName tableName) { synchronized (this.cache) { return isDisabledTable(tableName) || isEnablingTable(tableName); } } - private boolean isTableState(final String tableName, final ZooKeeperProtos.Table.State state) { + private boolean isTableState(final TableName tableName, final ZooKeeperProtos.Table.State state) { synchronized (this.cache) { ZooKeeperProtos.Table.State currentState = this.cache.get(tableName); return ZKTableReadOnly.isTableState(currentState, state); @@ -251,7 +253,7 @@ public class ZKTable { * @param tableName * @throws KeeperException unexpected zookeeper exception */ - public void setDeletedTable(final String tableName) + public void setDeletedTable(final TableName tableName) throws KeeperException { synchronized (this.cache) { if (this.cache.remove(tableName) == null) { @@ -259,7 +261,7 @@ public class ZKTable { "already deleted"); } ZKUtil.deleteNodeFailSilent(this.watcher, - ZKUtil.joinZNode(this.watcher.tableZNode, tableName)); + ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString())); } } @@ -270,7 +272,7 @@ public class ZKTable { * @param tableName * @throws KeeperException */ - public void setEnabledTable(final String tableName) throws KeeperException { + public void setEnabledTable(final TableName tableName) throws KeeperException { setTableState(tableName, ZooKeeperProtos.Table.State.ENABLED); } @@ -280,7 +282,7 @@ public class ZKTable { * @param tableName * @return true if the table is present */ - public boolean isTablePresent(final String tableName) { + public boolean isTablePresent(final TableName tableName) { synchronized (this.cache) { ZooKeeperProtos.Table.State state = this.cache.get(tableName); return !(state == null); @@ -291,11 +293,11 @@ public class ZKTable { * Gets a list of all the tables set as disabled in zookeeper. * @return Set of disabled tables, empty Set if none */ - public Set getDisabledTables() { - Set disabledTables = new HashSet(); + public Set getDisabledTables() { + Set disabledTables = new HashSet(); synchronized (this.cache) { - Set tables = this.cache.keySet(); - for (String table: tables) { + Set tables = this.cache.keySet(); + for (TableName table: tables) { if (isDisabledTable(table)) disabledTables.add(table); } } @@ -307,7 +309,7 @@ public class ZKTable { * @return Set of disabled tables, empty Set if none * @throws KeeperException */ - public static Set getDisabledTables(ZooKeeperWatcher zkw) + public static Set getDisabledTables(ZooKeeperWatcher zkw) throws KeeperException { return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLED); } @@ -317,7 +319,7 @@ public class ZKTable { * @return Set of disabling tables, empty Set if none * @throws KeeperException */ - public static Set getDisablingTables(ZooKeeperWatcher zkw) + public static Set getDisablingTables(ZooKeeperWatcher zkw) throws KeeperException { return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLING); } @@ -327,7 +329,7 @@ public class ZKTable { * @return Set of enabling tables, empty Set if none * @throws KeeperException */ - public static Set getEnablingTables(ZooKeeperWatcher zkw) + public static Set getEnablingTables(ZooKeeperWatcher zkw) throws KeeperException { return getAllTables(zkw, ZooKeeperProtos.Table.State.ENABLING); } @@ -337,7 +339,7 @@ public class ZKTable { * @return Set of disabled tables, empty Set if none * @throws KeeperException */ - public static Set getDisabledOrDisablingTables(ZooKeeperWatcher zkw) + public static Set getDisabledOrDisablingTables(ZooKeeperWatcher zkw) throws KeeperException { return getAllTables(zkw, ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING); @@ -352,14 +354,14 @@ public class ZKTable { * @param deleteZNode * @throws KeeperException */ - public void removeEnablingTable(final String tableName, boolean deleteZNode) + public void removeEnablingTable(final TableName tableName, boolean deleteZNode) throws KeeperException { synchronized (this.cache) { if (isEnablingTable(tableName)) { this.cache.remove(tableName); if (deleteZNode) { ZKUtil.deleteNodeFailSilent(this.watcher, - ZKUtil.joinZNode(this.watcher.tableZNode, tableName)); + ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString())); } } } @@ -371,17 +373,18 @@ public class ZKTable { * @return Set of tables of specified states, empty Set if none * @throws KeeperException */ - static Set getAllTables(final ZooKeeperWatcher zkw, + static Set getAllTables(final ZooKeeperWatcher zkw, final ZooKeeperProtos.Table.State... states) throws KeeperException { - Set allTables = new HashSet(); + Set allTables = new HashSet(); List children = ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); if(children == null) return allTables; for (String child: children) { - ZooKeeperProtos.Table.State state = ZKTableReadOnly.getTableState(zkw, child); + TableName tableName = TableName.valueOf(child); + ZooKeeperProtos.Table.State state = ZKTableReadOnly.getTableState(zkw, tableName); for (ZooKeeperProtos.Table.State expectedState: states) { if (state == expectedState) { - allTables.add(child); + allTables.add(tableName); break; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableReadOnly.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableReadOnly.java index d1d35ef..5cf62f2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableReadOnly.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableReadOnly.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.zookeeper; import com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; @@ -50,7 +51,7 @@ public class ZKTableReadOnly { * @throws KeeperException */ public static boolean isDisabledTable(final ZooKeeperWatcher zkw, - final String tableName) + final TableName tableName) throws KeeperException { ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); return isTableState(ZooKeeperProtos.Table.State.DISABLED, state); @@ -66,7 +67,7 @@ public class ZKTableReadOnly { * @throws KeeperException */ public static boolean isEnabledTable(final ZooKeeperWatcher zkw, - final String tableName) + final TableName tableName) throws KeeperException { return getTableState(zkw, tableName) == ZooKeeperProtos.Table.State.ENABLED; } @@ -82,7 +83,7 @@ public class ZKTableReadOnly { * @throws KeeperException */ public static boolean isDisablingOrDisabledTable(final ZooKeeperWatcher zkw, - final String tableName) + final TableName tableName) throws KeeperException { ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); return isTableState(ZooKeeperProtos.Table.State.DISABLING, state) || @@ -94,14 +95,16 @@ public class ZKTableReadOnly { * @return Set of disabled tables, empty Set if none * @throws KeeperException */ - public static Set getDisabledTables(ZooKeeperWatcher zkw) + public static Set getDisabledTables(ZooKeeperWatcher zkw) throws KeeperException { - Set disabledTables = new HashSet(); + Set disabledTables = new HashSet(); List children = ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); for (String child: children) { - ZooKeeperProtos.Table.State state = getTableState(zkw, child); - if (state == ZooKeeperProtos.Table.State.DISABLED) disabledTables.add(child); + TableName tableName = + TableName.valueOf(child); + ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); + if (state == ZooKeeperProtos.Table.State.DISABLED) disabledTables.add(tableName); } return disabledTables; } @@ -111,16 +114,18 @@ public class ZKTableReadOnly { * @return Set of disabled tables, empty Set if none * @throws KeeperException */ - public static Set getDisabledOrDisablingTables(ZooKeeperWatcher zkw) + public static Set getDisabledOrDisablingTables(ZooKeeperWatcher zkw) throws KeeperException { - Set disabledTables = new HashSet(); + Set disabledTables = new HashSet(); List children = ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); for (String child: children) { - ZooKeeperProtos.Table.State state = getTableState(zkw, child); + TableName tableName = + TableName.valueOf(child); + ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); if (state == ZooKeeperProtos.Table.State.DISABLED || state == ZooKeeperProtos.Table.State.DISABLING) - disabledTables.add(child); + disabledTables.add(tableName); } return disabledTables; } @@ -132,14 +137,14 @@ public class ZKTableReadOnly { /** * @param zkw - * @param child + * @param tableName * @return Null or {@link ZooKeeperProtos.Table.State} found in znode. * @throws KeeperException */ static ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw, - final String child) + final TableName tableName) throws KeeperException { - String znode = ZKUtil.joinZNode(zkw.tableZNode, child); + String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString()); byte [] data = ZKUtil.getData(zkw, znode); if (data == null || data.length <= 0) return null; try { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index 4309497..6cdd6da 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -109,6 +109,9 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { public String tableLockZNode; // znode containing the state of recovering regions public String recoveringRegionsZNode; + // znode containing namespace descriptors + public static String namespaceZNode = "namespace"; + // Certain ZooKeeper nodes need to be world-readable public static final ArrayList CREATOR_ALL_AND_WORLD_READABLE = @@ -231,7 +234,9 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { tableLockZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.tableLock", "table-lock")); recoveringRegionsZNode = ZKUtil.joinZNode(baseZNode, - conf.get("zookeeper.znode.recovering.regions", "recovering-regions")); + conf.get("zookeeper.znode.recovering.regions", "recovering-regions")); + namespaceZNode = ZKUtil.joinZNode(baseZNode, + conf.get("zookeeper.znode.namespace", "namespace")); } /** diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java index bdd6157..215a2b1 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; @@ -47,16 +48,17 @@ import java.util.concurrent.atomic.AtomicInteger; @Category(MediumTests.class) public class TestAsyncProcess { - private static final byte[] DUMMY_TABLE = "DUMMY_TABLE".getBytes(); + private static final TableName DUMMY_TABLE = + TableName.valueOf("DUMMY_TABLE"); private static final byte[] DUMMY_BYTES_1 = "DUMMY_BYTES_1".getBytes(); private static final byte[] DUMMY_BYTES_2 = "DUMMY_BYTES_2".getBytes(); private static final byte[] FAILS = "FAILS".getBytes(); private static final Configuration conf = new Configuration(); - private static ServerName sn = new ServerName("localhost:10,1254"); - private static HRegionInfo hri1 = new HRegionInfo(DUMMY_BYTES_1); - private static HRegionInfo hri2 = new HRegionInfo(DUMMY_BYTES_1); + private static HRegionInfo hri1 = new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_1, DUMMY_BYTES_2); + private static HRegionInfo hri2 = + new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_2, HConstants.EMPTY_END_ROW); private static HRegionLocation loc1 = new HRegionLocation(hri1, sn); private static HRegionLocation loc2 = new HRegionLocation(hri2, sn); @@ -118,7 +120,8 @@ public class TestAsyncProcess { } @Override - protected AsyncProcess createAsyncProcess(byte[] tableName, ExecutorService pool, + protected AsyncProcess createAsyncProcess(TableName tableName, + ExecutorService pool, AsyncProcess.AsyncProcessCallback callback, Configuration conf) { ap = new MyAsyncProcess(this, callback, conf); @@ -126,7 +129,7 @@ public class TestAsyncProcess { } @Override - public HRegionLocation locateRegion(final byte[] tableName, + public HRegionLocation locateRegion(final TableName tableName, final byte[] row) { return loc1; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index da48d55..60fb8c8 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -25,6 +25,7 @@ import java.net.SocketTimeoutException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -81,7 +82,7 @@ public class TestClientNoCluster { } @Override - public boolean isTableOnlineState(byte[] tableName, boolean enabled) + public boolean isTableOnlineState(TableName tableName, boolean enabled) throws IOException { return enabled; } @@ -102,7 +103,7 @@ public class TestClientNoCluster { Configuration localConfig = HBaseConfiguration.create(this.conf); // This override mocks up our exists/get call to throw a RegionServerStoppedException. localConfig.set("hbase.client.connection.impl", RpcTimeoutConnection.class.getName()); - HTable table = new HTable(localConfig, HConstants.META_TABLE_NAME); + HTable table = new HTable(localConfig, TableName.META_TABLE_NAME); Throwable t = null; LOG.info("Start"); try { @@ -139,7 +140,7 @@ public class TestClientNoCluster { // and it has expired. Otherwise, if this functionality is broke, all retries will be run -- // all ten of them -- and we'll get the RetriesExhaustedException exception. localConfig.setInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, pause - 1); - HTable table = new HTable(localConfig, HConstants.META_TABLE_NAME); + HTable table = new HTable(localConfig, TableName.META_TABLE_NAME); Throwable t = null; try { // An exists call turns into a get w/ a flag. @@ -171,7 +172,7 @@ public class TestClientNoCluster { // Go against meta else we will try to find first region for the table on construction which // means we'll have to do a bunch more mocking. Tests that go against meta only should be // good for a bit of testing. - HTable table = new HTable(this.conf, HConstants.META_TABLE_NAME); + HTable table = new HTable(this.conf, TableName.META_TABLE_NAME); ResultScanner scanner = table.getScanner(HConstants.CATALOG_FAMILY); try { Result result = null; @@ -191,7 +192,7 @@ public class TestClientNoCluster { // Go against meta else we will try to find first region for the table on construction which // means we'll have to do a bunch more mocking. Tests that go against meta only should be // good for a bit of testing. - HTable table = new HTable(this.conf, HConstants.META_TABLE_NAME); + HTable table = new HTable(this.conf, TableName.META_TABLE_NAME); ResultScanner scanner = table.getScanner(HConstants.CATALOG_FAMILY); try { Result result = null; @@ -294,4 +295,4 @@ public class TestClientNoCluster { return this.stub; } } -} \ No newline at end of file +} diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java index 5b10edb..b9f6669 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java @@ -22,12 +22,16 @@ import static org.junit.Assert.fail; import java.io.IOException; +import com.google.protobuf.ByteString; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse; @@ -100,7 +104,7 @@ public class TestSnapshotFromAdmin { // setup the admin and run the test HBaseAdmin admin = new HBaseAdmin(mockConnection); String snapshot = "snapshot"; - String table = "table"; + TableName table = TableName.valueOf("table"); // get start time long start = System.currentTimeMillis(); admin.snapshot(snapshot, table); @@ -128,6 +132,7 @@ public class TestSnapshotFromAdmin { failSnapshotStart(admin, builder.setName("-snapshot").build()); failSnapshotStart(admin, builder.setName("snapshot fails").build()); failSnapshotStart(admin, builder.setName("snap$hot").build()); + failSnapshotStart(admin, builder.setName("snap:hot").build()); // check the table name also get verified failSnapshotStart(admin, builder.setName("snapshot").setTable(".table").build()); failSnapshotStart(admin, builder.setName("snapshot").setTable("-table").build()); @@ -144,7 +149,7 @@ public class TestSnapshotFromAdmin { IsSnapshotDoneResponse doneResponse = IsSnapshotDoneResponse.newBuilder().setDone(true).build(); Mockito.when( master.isSnapshotDone((RpcController) Mockito.isNull(), - Mockito.any(IsSnapshotDoneRequest.class))).thenReturn(doneResponse); + Mockito.any(IsSnapshotDoneRequest.class))).thenReturn(doneResponse); // make sure that we can use valid names admin.snapshot(builder.setName("snapshot").setTable("table").build()); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 9d75fe7..3ec7435 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -38,9 +38,11 @@ import org.apache.hadoop.hbase.util.Bytes; @InterfaceAudience.Public @InterfaceStability.Stable public final class HConstants { + //Bytes.UTF8_ENCODING should be updated if this changed /** When we encode strings, we always specify UTF8 encoding */ public static final String UTF8_ENCODING = "UTF-8"; + //Bytes.UTF8_CHARSET should be updated if this changed /** When we encode strings, we always specify UTF8 encoding */ public static final Charset UTF8_CHARSET = Charset.forName(UTF8_ENCODING); /** @@ -104,9 +106,10 @@ public final class HConstants { * Version 5 changes versions in catalog table regions. * Version 6 enables blockcaching on catalog tables. * Version 7 introduces hfile -- hbase 0.19 to 0.20.. + * Version 8 introduces namespace */ // public static final String FILE_SYSTEM_VERSION = "6"; - public static final String FILE_SYSTEM_VERSION = "7"; + public static final String FILE_SYSTEM_VERSION = "8"; // Configuration parameters @@ -349,11 +352,7 @@ public final class HConstants { // be the first to be reassigned if the server(s) they are being served by // should go down. - /** The root table's name.*/ - public static final byte [] ROOT_TABLE_NAME = Bytes.toBytes("-ROOT-"); - - /** The META table's name. */ - public static final byte [] META_TABLE_NAME = Bytes.toBytes(".META."); + public static final String BASE_NAMESPACE_DIR = ".data"; /** delimiter used between portions of a region name */ public static final int META_ROW_DELIMITER = ','; @@ -826,12 +825,12 @@ public final class HConstants { Collections.unmodifiableList(Arrays.asList(new String[] { HREGION_LOGDIR_NAME, HREGION_OLDLOGDIR_NAME, CORRUPT_DIR_NAME, SPLIT_LOGDIR_NAME, HBCK_SIDELINEDIR_NAME, HFILE_ARCHIVE_DIRECTORY, SNAPSHOT_DIR_NAME, HBASE_TEMP_DIRECTORY, - OLD_SNAPSHOT_DIR_NAME })); + OLD_SNAPSHOT_DIR_NAME, BASE_NAMESPACE_DIR})); /** Directories that are not HBase user table directories */ public static final List HBASE_NON_USER_TABLE_DIRS = Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll( - new String[] { Bytes.toString(META_TABLE_NAME), Bytes.toString(ROOT_TABLE_NAME) }, + new String[] { TableName.META_TABLE_NAME.getNameAsString(), TableName.ROOT_TABLE_NAME.getNameAsString() }, HBASE_NON_TABLE_DIRS.toArray()))); /** Health script related settings. */ diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index 2b918fd..bbfe9a6 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -68,6 +68,10 @@ import com.google.common.primitives.Longs; @InterfaceStability.Evolving public class KeyValue implements Cell, HeapSize, Cloneable { static final Log LOG = LogFactory.getLog(KeyValue.class); + + private static final int META_LENGTH = + TableName.META_TABLE_NAME.getName().length; // 'hbase.meta' length + // TODO: Group Key-only comparators and operations into a Key class, just // for neatness sake, if can figure what to call it. @@ -123,11 +127,11 @@ public class KeyValue implements Cell, HeapSize, Cloneable { * @param tableName The table name. * @return The comparator. */ - public static KeyComparator getRowComparator(byte [] tableName) { - if(Bytes.equals(HConstants.ROOT_TABLE_NAME,tableName)) { + public static KeyComparator getRowComparator(TableName tableName) { + if(TableName.ROOT_TABLE_NAME.equals(tableName)) { return ROOT_COMPARATOR.getRawComparator(); } - if(Bytes.equals(HConstants.META_TABLE_NAME, tableName)) { + if(TableName.META_TABLE_NAME.equals(tableName)) { return META_COMPARATOR.getRawComparator(); } return COMPARATOR.getRawComparator(); @@ -2399,14 +2403,13 @@ public class KeyValue implements Cell, HeapSize, Cloneable { // Rows look like this: .META.,ROW_FROM_META,RID // LOG.info("ROOT " + Bytes.toString(left, loffset, llength) + // "---" + Bytes.toString(right, roffset, rlength)); - final int metalength = 7; // '.META.' length - int lmetaOffsetPlusDelimiter = loffset + metalength; + int lmetaOffsetPlusDelimiter = loffset + META_LENGTH + 1; int leftFarDelimiter = getDelimiterInReverse(left, lmetaOffsetPlusDelimiter, - llength - metalength, HConstants.DELIMITER); - int rmetaOffsetPlusDelimiter = roffset + metalength; + llength - META_LENGTH - 1, HConstants.DELIMITER); + int rmetaOffsetPlusDelimiter = roffset + META_LENGTH + 1; int rightFarDelimiter = getDelimiterInReverse(right, - rmetaOffsetPlusDelimiter, rlength - metalength, + rmetaOffsetPlusDelimiter, rlength - META_LENGTH - 1, HConstants.DELIMITER); if (leftFarDelimiter < 0 && rightFarDelimiter >= 0) { // Nothing between .META. and regionid. Its first key. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java new file mode 100644 index 0000000..5ccb49e --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java @@ -0,0 +1,202 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.Bytes; + +import java.util.Collections; +import java.util.Comparator; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; + +/** + * Namespace POJO class. Used to represent and define namespaces. + * + * Descriptors will be persisted in an hbase table. + * This works since namespaces are essentially metadata of a group of tables + * as opposed to a more tangible container. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class NamespaceDescriptor { + + /** System namespace name. */ + public static final byte [] SYSTEM_NAMESPACE_NAME = Bytes.toBytes("hbase"); + public static final String SYSTEM_NAMESPACE_NAME_STR = + Bytes.toString(SYSTEM_NAMESPACE_NAME); + /** Default namespace name. */ + public static final byte [] DEFAULT_NAMESPACE_NAME = Bytes.toBytes("default"); + public static final String DEFAULT_NAMESPACE_NAME_STR = + Bytes.toString(DEFAULT_NAMESPACE_NAME); + + public static final NamespaceDescriptor DEFAULT_NAMESPACE = NamespaceDescriptor.create( + DEFAULT_NAMESPACE_NAME_STR).build(); + public static final NamespaceDescriptor SYSTEM_NAMESPACE = NamespaceDescriptor.create( + SYSTEM_NAMESPACE_NAME_STR).build(); + + public final static Set RESERVED_NAMESPACES; + static { + Set set = new HashSet(); + set.add(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR); + set.add(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); + RESERVED_NAMESPACES = Collections.unmodifiableSet(set); + } + public final static Set RESERVED_NAMESPACES_BYTES; + static { + Set set = new TreeSet(Bytes.BYTES_RAWCOMPARATOR); + for(String name: RESERVED_NAMESPACES) { + set.add(Bytes.toBytes(name)); + } + RESERVED_NAMESPACES_BYTES = Collections.unmodifiableSet(set); + } + + private String name; + private Map configuration; + + public static final Comparator NAMESPACE_DESCRIPTOR_COMPARATOR = + new Comparator() { + @Override + public int compare(NamespaceDescriptor namespaceDescriptor, + NamespaceDescriptor namespaceDescriptor2) { + return namespaceDescriptor.getName().compareTo(namespaceDescriptor2.getName()); + } + }; + + private NamespaceDescriptor() { + } + + private NamespaceDescriptor(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + /** + * Getter for accessing the configuration value by key + */ + public String getConfigurationValue(String key) { + return configuration.get(key); + } + + /** + * Getter for fetching an unmodifiable {@link #configuration} map. + */ + public Map getConfiguration() { + // shallow pointer copy + return Collections.unmodifiableMap(configuration); + } + + /** + * Setter for storing a configuration setting in {@link #configuration} map. + * @param key Config key. Same as XML config key e.g. hbase.something.or.other. + * @param value String value. If null, removes the setting. + */ + public void setConfiguration(String key, String value) { + if (value == null) { + removeConfiguration(key); + } else { + configuration.put(key, value); + } + } + + /** + * Remove a config setting represented by the key from the {@link #configuration} map + */ + public void removeConfiguration(final String key) { + configuration.remove(key); + } + + @Override + public String toString() { + StringBuilder s = new StringBuilder(); + s.append('{'); + s.append(HConstants.NAME); + s.append(" => '"); + s.append(name); + s.append("'"); + for (Map.Entry e : configuration.entrySet()) { + String key = e.getKey(); + String value = e.getValue(); + if (key == null) { + continue; + } + s.append(", "); + s.append(key); + s.append(" => '"); + s.append(value); + s.append("'"); + } + s.append('}'); + return s.toString(); + } + + public static Builder create(String name) { + return new Builder(name); + } + + public static Builder create(NamespaceDescriptor ns) { + return new Builder(ns); + } + + public static class Builder { + private String bName; + private Map bConfiguration = new TreeMap(); + + private Builder(NamespaceDescriptor ns) { + this.bName = ns.name; + this.bConfiguration = ns.configuration; + } + + private Builder(String name) { + this.bName = name; + } + + public Builder addConfiguration(Map configuration) { + this.bConfiguration.putAll(configuration); + return this; + } + + public Builder addConfiguration(String key, String value) { + this.bConfiguration.put(key, value); + return this; + } + + public Builder removeConfiguration(String key) { + this.bConfiguration.remove(key); + return this; + } + + public NamespaceDescriptor build() { + if (this.bName == null){ + throw new IllegalArgumentException("A name has to be specified in a namespace."); + } + + NamespaceDescriptor desc = new NamespaceDescriptor(this.bName); + desc.configuration = this.bConfiguration; + return desc; + } + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java new file mode 100644 index 0000000..10cb310 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java @@ -0,0 +1,295 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Immutable POJO class for representing a table name. + * Which is of the form: + * <table namespace>:<table qualifier> + * + * Two special namespaces: + * + * 1. hbase - system namespace, used to contain hbase internal tables + * 2. default - tables with no explicit specified namespace will + * automatically fall into this namespace. + * + * ie + * + * a) foo:bar, means namespace=foo and qualifier=bar + * b) bar, means namespace=default and qualifier=bar + * c) default:bar, means namespace=default and qualifier=bar + * + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class TableName implements Comparable { + + /** Namespace delimiter */ + //this should always be only 1 byte long + public final static char NAMESPACE_DELIM = ':'; + + // A non-capture group so that this can be embedded. + // regex is a bit more complicated to support nuance of tables + // in default namespace + //Allows only letters, digits and '_' + public static final String VALID_NAMESPACE_REGEX = + "(?:[a-zA-Z_0-9]+)"; + //Allows only letters, digits, '_', '-' and '.' + public static final String VALID_TABLE_QUALIFIER_REGEX = + "(?:[a-zA-Z_0-9][a-zA-Z_0-9-.]*)"; + //Concatenation of NAMESPACE_REGEX and TABLE_QUALIFIER_REGEX, + //with NAMESPACE_DELIM as delimiter + public static final String VALID_USER_TABLE_REGEX = + "(?:(?:(?:"+VALID_NAMESPACE_REGEX+"\\"+NAMESPACE_DELIM+")?)" + + "(?:"+VALID_TABLE_QUALIFIER_REGEX+"))"; + + /** The root table's name.*/ + public static final TableName ROOT_TABLE_NAME = + valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "root"); + + /** The META table's name. */ + public static final TableName META_TABLE_NAME = + valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta"); + + /** The Namespace table's name. */ + public static final TableName NAMESPACE_TABLE_NAME = + valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "namespace"); + + private byte[] name; + private String nameAsString; + private byte[] namespace; + private String namespaceAsString; + private byte[] qualifier; + private String qualifierAsString; + + private TableName() {} + + /** + * Check passed byte array, "tableName", is legal user-space table name. + * @return Returns passed tableName param + * @throws IllegalArgumentException if passed a tableName is null or + * is made of other than 'word' characters or underscores: i.e. + * [a-zA-Z_0-9.-:]. The ':' is used to delimit the namespace + * from the table name and can be used for nothing else. + * + * Namespace names can only contain 'word' characters + * [a-zA-Z_0-9] or '_' + * + * Qualifier names can only contain 'word' characters + * [a-zA-Z_0-9] or '_', '.' or '-'. + * The name may not start with '.' or '-'. + * + * Valid fully qualified table names: + * foo:bar, namespace=>foo, table=>bar + * org:foo.bar, namespace=org, table=>foo.bar + */ + public static byte [] isLegalFullyQualifiedTableName(final byte[] tableName) { + if (tableName == null || tableName.length <= 0) { + throw new IllegalArgumentException("Name is null or empty"); + } + int namespaceDelimIndex = com.google.common.primitives.Bytes.lastIndexOf(tableName, + (byte) NAMESPACE_DELIM); + if (namespaceDelimIndex == 0 || namespaceDelimIndex == -1){ + isLegalTableQualifierName(tableName); + } else { + isLegalNamespaceName(tableName, 0, namespaceDelimIndex); + isLegalTableQualifierName(tableName, namespaceDelimIndex + 1, tableName.length); + } + return tableName; + } + + public static void isLegalTableQualifierName(final byte[] qualifierName){ + isLegalTableQualifierName(qualifierName, 0, qualifierName.length); + } + + /** + * Qualifier names can only contain 'word' characters + * [a-zA-Z_0-9] or '_', '.' or '-'. + * The name may not start with '.' or '-'. + * + * @param qualifierName byte array containing the qualifier name + * @param start start index + * @param end end index (exclusive) + */ + public static void isLegalTableQualifierName(final byte[] qualifierName, + int start, + int end){ + if(end - start < 1) { + throw new IllegalArgumentException("Table qualifier must not be empty"); + } + if (qualifierName[start] == '.' || qualifierName[start] == '-') { + throw new IllegalArgumentException("Illegal first character <" + qualifierName[0] + + "> at 0. Namespaces can only start with alphanumeric " + + "characters': i.e. [a-zA-Z_0-9]: " + Bytes.toString(qualifierName)); + } + for (int i = start; i < end; i++) { + if (Character.isLetterOrDigit(qualifierName[i]) || + qualifierName[i] == '_' || + qualifierName[i] == '-' || + qualifierName[i] == '.') { + continue; + } + throw new IllegalArgumentException("Illegal character <" + qualifierName[i] + + "> at " + i + ". User-space table qualifiers can only contain " + + "'alphanumeric characters': i.e. [a-zA-Z_0-9-.]: " + + Bytes.toString(qualifierName, start, end)); + } + } + + public static void isLegalNamespaceName(byte[] namespaceName) { + isLegalNamespaceName(namespaceName, 0, namespaceName.length); + } + + /** + * Valid namespace characters are [a-zA-Z_0-9] + * @param namespaceName + * @param offset + * @param length + */ + public static void isLegalNamespaceName(byte[] namespaceName, int offset, int length) { + for (int i = offset; i < length; i++) { + if (Character.isLetterOrDigit(namespaceName[i])|| namespaceName[i] == '_') { + continue; + } + throw new IllegalArgumentException("Illegal character <" + namespaceName[i] + + "> at " + i + ". Namespaces can only contain " + + "'alphanumeric characters': i.e. [a-zA-Z_0-9]: " + Bytes.toString(namespaceName, + offset, length)); + } + } + + public byte[] getName() { + return name; + } + + public String getNameAsString() { + return nameAsString; + } + + public byte[] getNamespace() { + return namespace; + } + + public String getNamespaceAsString() { + return namespaceAsString; + } + + public byte[] getQualifier() { + return qualifier; + } + + public String getQualifierAsString() { + return qualifierAsString; + } + + public byte[] toBytes() { + return name; + } + + @Override + public String toString() { + return nameAsString; + } + + public static TableName valueOf(byte[] namespace, byte[] qualifier) { + TableName ret = new TableName(); + if(namespace == null || namespace.length < 1) { + namespace = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME; + } + ret.namespace = namespace; + ret.namespaceAsString = Bytes.toString(namespace); + ret.qualifier = qualifier; + ret.qualifierAsString = Bytes.toString(qualifier); + + finishValueOf(ret); + + return ret; + } + + public static TableName valueOf(String namespaceAsString, String qualifierAsString) { + TableName ret = new TableName(); + if(namespaceAsString == null || namespaceAsString.length() < 1) { + namespaceAsString = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR; + } + ret.namespaceAsString = namespaceAsString; + ret.namespace = Bytes.toBytes(namespaceAsString); + ret.qualifier = Bytes.toBytes(qualifierAsString); + ret.qualifierAsString = qualifierAsString; + + finishValueOf(ret); + + return ret; + } + + private static void finishValueOf(TableName tableName) { + isLegalNamespaceName(tableName.namespace); + isLegalTableQualifierName(tableName.qualifier); + + tableName.nameAsString = + createFullyQualified(tableName.namespaceAsString, tableName.qualifierAsString); + tableName.name = Bytes.toBytes(tableName.nameAsString); + } + + public static TableName valueOf(byte[] name) { + return valueOf(Bytes.toString(name)); + } + + public static TableName valueOf(String name) { + isLegalFullyQualifiedTableName(Bytes.toBytes(name)); + int index = name.indexOf(NAMESPACE_DELIM); + if (index != -1) { + return TableName.valueOf(name.substring(0, index), name.substring(index + 1)); + } + return TableName.valueOf(NamespaceDescriptor.DEFAULT_NAMESPACE.getName(), name); + } + + private static String createFullyQualified(String namespace, String tableQualifier) { + if (namespace.equals(NamespaceDescriptor.DEFAULT_NAMESPACE.getName())) { + return tableQualifier; + } + return namespace+NAMESPACE_DELIM+tableQualifier; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + TableName tableName = (TableName) o; + + if (!nameAsString.equals(tableName.nameAsString)) return false; + + return true; + } + + @Override + public int hashCode() { + int result = nameAsString.hashCode(); + return result; + } + + @Override + public int compareTo(TableName tableName) { + return this.nameAsString.compareTo(tableName.getNameAsString()); + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java index 2921074..7f91c64 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java @@ -29,6 +29,7 @@ import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.nio.ByteOrder; +import java.nio.charset.Charset; import java.security.AccessController; import java.security.PrivilegedAction; import java.security.SecureRandom; @@ -42,7 +43,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.WritableComparator; @@ -61,6 +61,16 @@ import com.google.common.collect.Lists; @InterfaceAudience.Public @InterfaceStability.Stable public class Bytes { + //HConstants.UTF8_ENCODING should be updated if this changed + /** When we encode strings, we always specify UTF8 encoding */ + private static final String UTF8_ENCODING = "UTF-8"; + + //HConstants.UTF8_CHARSET should be updated if this changed + /** When we encode strings, we always specify UTF8 encoding */ + private static final Charset UTF8_CHARSET = Charset.forName(UTF8_ENCODING); + + //HConstants.EMPTY_BYTE_ARRAY should be updated if this changed + private static final byte [] EMPTY_BYTE_ARRAY = new byte [0]; private static final Log LOG = LogFactory.getLog(Bytes.class); @@ -341,7 +351,7 @@ public class Bytes { if (len == 0) { return ""; } - return new String(b, off, len, HConstants.UTF8_CHARSET); + return new String(b, off, len, UTF8_CHARSET); } /** @@ -463,7 +473,7 @@ public class Bytes { * @return the byte array */ public static byte[] toBytes(String s) { - return s.getBytes(HConstants.UTF8_CHARSET); + return s.getBytes(UTF8_CHARSET); } /** @@ -1295,7 +1305,7 @@ public class Bytes { * @return New array that has a in lower half and b in upper half. */ public static byte [] add(final byte [] a, final byte [] b) { - return add(a, b, HConstants.EMPTY_BYTE_ARRAY); + return add(a, b, EMPTY_BYTE_ARRAY); } /** diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java index 3381db6..45f6e6c 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java @@ -130,12 +130,16 @@ public class TestKeyValue extends TestCase { public void testMoreComparisons() throws Exception { // Root compares long now = System.currentTimeMillis(); - KeyValue a = new KeyValue(Bytes.toBytes(".META.,,99999999999999"), now); - KeyValue b = new KeyValue(Bytes.toBytes(".META.,,1"), now); + KeyValue a = new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,99999999999999"), now); + KeyValue b = new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now); KVComparator c = new KeyValue.RootComparator(); assertTrue(c.compare(b, a) < 0); - KeyValue aa = new KeyValue(Bytes.toBytes(".META.,,1"), now); - KeyValue bb = new KeyValue(Bytes.toBytes(".META.,,1"), + KeyValue aa = new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now); + KeyValue bb = new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1235943454602L, (byte[])null); assertTrue(c.compare(aa, bb) < 0); @@ -213,32 +217,46 @@ public class TestKeyValue extends TestCase { Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); assertTrue(KeyValue.META_COMPARATOR.compare(rowA, rowB) < 0); - rowA = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/,1234,4321"), + rowA = new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",testtable,www.hbase.org/,1234,4321"), Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); - rowB = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/%20,99999,99999"), + rowB = new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",testtable,www.hbase.org/%20,99999,99999"), Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); assertTrue(KeyValue.ROOT_COMPARATOR.compare(rowA, rowB) < 0); } private void metacomparisons(final KeyValue.MetaComparator c) { long now = System.currentTimeMillis(); - assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now), - new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now)) == 0); - KeyValue a = new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now); - KeyValue b = new KeyValue(Bytes.toBytes(".META.,a,,0,2"), now); + assertTrue(c.compare(new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now), + new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now)) == 0); + KeyValue a = new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now); + KeyValue b = new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,2"), now); assertTrue(c.compare(a, b) < 0); - assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,a,,0,2"), now), - new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now)) > 0); + assertTrue(c.compare(new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,2"), now), + new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",a,,0,1"), now)) > 0); } private void comparisons(final KeyValue.KVComparator c) { long now = System.currentTimeMillis(); - assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,,1"), now), - new KeyValue(Bytes.toBytes(".META.,,1"), now)) == 0); - assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,,1"), now), - new KeyValue(Bytes.toBytes(".META.,,2"), now)) < 0); - assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,,2"), now), - new KeyValue(Bytes.toBytes(".META.,,1"), now)) > 0); + assertTrue(c.compare(new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now), + new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now)) == 0); + assertTrue(c.compare(new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now), + new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,2"), now)) < 0); + assertTrue(c.compare(new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,2"), now), + new KeyValue( + Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,1"), now)) > 0); } public void testBinaryKeys() throws Exception { @@ -280,12 +298,12 @@ public class TestKeyValue extends TestCase { } // Make up -ROOT- table keys. KeyValue [] rootKeys = { - new KeyValue(Bytes.toBytes(".META.,aaaaa,\u0000\u0000,0,2"), fam, qf, 2, nb), - new KeyValue(Bytes.toBytes(".META.,aaaaa,\u0001,0,3"), fam, qf, 3, nb), - new KeyValue(Bytes.toBytes(".META.,aaaaa,,0,1"), fam, qf, 1, nb), - new KeyValue(Bytes.toBytes(".META.,aaaaa,\u1000,0,5"), fam, qf, 5, nb), - new KeyValue(Bytes.toBytes(".META.,aaaaa,a,0,4"), fam, qf, 4, nb), - new KeyValue(Bytes.toBytes(".META.,,0"), fam, qf, 0, nb), + new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",aaaaa,\u0000\u0000,0,2"), fam, qf, 2, nb), + new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",aaaaa,\u0001,0,3"), fam, qf, 3, nb), + new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",aaaaa,,0,1"), fam, qf, 1, nb), + new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",aaaaa,\u1000,0,5"), fam, qf, 5, nb), + new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",aaaaa,a,0,4"), fam, qf, 4, nb), + new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString()+",,0"), fam, qf, 0, nb), }; // This will output the keys incorrectly. set = new TreeSet(new KeyValue.MetaComparator()); @@ -561,4 +579,4 @@ public class TestKeyValue extends TestCase { assertTrue(keyComparator.compare(kv1.getKey(), newKey) < 0); assertTrue(keyComparator.compare(newKey, kv2.getKey()) == 0); } -} \ No newline at end of file +} diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestBulkDeleteProtocol.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestBulkDeleteProtocol.java index c44d1c9..86564e1 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestBulkDeleteProtocol.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestBulkDeleteProtocol.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -216,7 +217,7 @@ public class TestBulkDeleteProtocol { // @Ignore @Test public void testBulkDeleteFamily() throws Throwable { byte[] tableName = Bytes.toBytes("testBulkDeleteFamily"); - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); htd.addFamily(new HColumnDescriptor(FAMILY1)); htd.addFamily(new HColumnDescriptor(FAMILY2)); TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5); @@ -425,7 +426,7 @@ public class TestBulkDeleteProtocol { } private HTable createTable(byte[] tableName) throws IOException { - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor hcd = new HColumnDescriptor(FAMILY1); hcd.setMaxVersions(10);// Just setting 10 as I am not testing with more than 10 versions here htd.addFamily(hcd); diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java index 755acbf..4ab53c7 100644 --- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java +++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/coprocessor/example/TestZooKeeperScanPolicyObserver.java @@ -23,12 +23,12 @@ import static org.junit.Assert.assertEquals; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -38,10 +38,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.ZooKeeper; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.Ignore; import org.junit.experimental.categories.Category; @Category(MediumTests.class) @@ -70,7 +66,8 @@ public class TestZooKeeperScanPolicyObserver { // @Ignore @Test public void testScanPolicyObserver() throws Exception { - byte[] tableName = Bytes.toBytes("testScanPolicyObserver"); + TableName tableName = + TableName.valueOf("testScanPolicyObserver"); HTableDescriptor desc = new HTableDescriptor(tableName); HColumnDescriptor hcd = new HColumnDescriptor(F) .setMaxVersions(10) diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java index 25c9c89..516382e 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestLazyCfLoading.java @@ -55,7 +55,8 @@ import org.junit.experimental.categories.Category; */ @Category(IntegrationTests.class) public class IntegrationTestLazyCfLoading { - private static final String TABLE_NAME = IntegrationTestLazyCfLoading.class.getSimpleName(); + private static final TableName TABLE_NAME = + TableName.valueOf(IntegrationTestLazyCfLoading.class.getSimpleName()); private static final String TIMEOUT_KEY = "hbase.%s.timeout"; private static final String ENCODING_KEY = "hbase.%s.datablock.encoding"; @@ -183,7 +184,7 @@ public class IntegrationTestLazyCfLoading { Configuration conf = util.getConfiguration(); String encodingKey = String.format(ENCODING_KEY, this.getClass().getSimpleName()); DataBlockEncoding blockEncoding = DataBlockEncoding.valueOf(conf.get(encodingKey, "FAST_DIFF")); - HTableDescriptor htd = new HTableDescriptor(Bytes.toBytes(TABLE_NAME)); + HTableDescriptor htd = new HTableDescriptor(TABLE_NAME); for (byte[] cf : dataGen.getColumnFamilies()) { HColumnDescriptor hcd = new HColumnDescriptor(cf); hcd.setDataBlockEncoding(blockEncoding); @@ -221,12 +222,12 @@ public class IntegrationTestLazyCfLoading { long maxRuntime = conf.getLong(timeoutKey, DEFAULT_TIMEOUT_MINUTES); long serverCount = util.getHBaseClusterInterface().getClusterStatus().getServersSize(); long keysToWrite = serverCount * KEYS_TO_WRITE_PER_SERVER; - HTable table = new HTable(conf, Bytes.toBytes(TABLE_NAME)); + HTable table = new HTable(conf, TABLE_NAME); // Create multi-threaded writer and start it. We write multiple columns/CFs and verify // their integrity, therefore multi-put is necessary. MultiThreadedWriter writer = - new MultiThreadedWriter(dataGen, conf, Bytes.toBytes(TABLE_NAME)); + new MultiThreadedWriter(dataGen, conf, TABLE_NAME); writer.setMultiPut(true); LOG.info("Starting writer; the number of keys to write is " + keysToWrite); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java index d3d4aad..beded5e 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestManyRegions.java @@ -137,7 +137,7 @@ public class IntegrationTestManyRegions { @Override public void run() { long startTime, endTime; - HTableDescriptor desc = new HTableDescriptor(TABLE_NAME); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TABLE_NAME)); desc.addFamily(new HColumnDescriptor(COLUMN_NAME)); SplitAlgorithm algo = new RegionSplitter.HexStringSplit(); byte[][] splits = algo.split(REGION_COUNT); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java index 2b4318d..a4feca5 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.IntegrationTestingUtility; import org.apache.hadoop.hbase.IntegrationTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; @@ -107,10 +108,8 @@ public class IntegrationTestMTTR { /** * Configurable table names. */ - private static String tableName; - private static byte[] tableNameBytes; - private static String loadTableName; - private static byte[] loadTableNameBytes; + private static TableName tableName; + private static TableName loadTableName; /** * Util to get at the cluster. @@ -164,13 +163,13 @@ public class IntegrationTestMTTR { private static void setupActions() throws IOException { // Set up the action that will restart a region server holding a region from our table // because this table should only have one region we should be good. - restartRSAction = new ChaosMonkey.RestartRsHoldingTable(SLEEP_TIME, tableName); + restartRSAction = new ChaosMonkey.RestartRsHoldingTable(SLEEP_TIME, tableName.getNameAsString()); // Set up the action that will kill the region holding meta. restartMetaAction = new ChaosMonkey.RestartRsHoldingMeta(SLEEP_TIME); // Set up the action that will move the regions of our table. - moveRegionAction = new ChaosMonkey.MoveRegionsOfTable(SLEEP_TIME, tableName); + moveRegionAction = new ChaosMonkey.MoveRegionsOfTable(SLEEP_TIME, tableName.getNameAsString()); // Kill the master restartMasterAction = new ChaosMonkey.RestartActiveMaster(1000); @@ -185,24 +184,22 @@ public class IntegrationTestMTTR { private static void setupTables() throws IOException { // Get the table name. - tableName = util.getConfiguration() - .get("hbase.IntegrationTestMTTR.tableName", "IntegrationTestMTTR"); - tableNameBytes = Bytes.toBytes(tableName); + tableName = TableName.valueOf(util.getConfiguration() + .get("hbase.IntegrationTestMTTR.tableName", "IntegrationTestMTTR")); - loadTableName = util.getConfiguration() - .get("hbase.IntegrationTestMTTR.loadTableName", "IntegrationTestMTTRLoadTestTool"); - loadTableNameBytes = Bytes.toBytes(loadTableName); + loadTableName = TableName.valueOf(util.getConfiguration() + .get("hbase.IntegrationTestMTTR.loadTableName", "IntegrationTestMTTRLoadTestTool")); - if (util.getHBaseAdmin().tableExists(tableNameBytes)) { - util.deleteTable(tableNameBytes); + if (util.getHBaseAdmin().tableExists(tableName)) { + util.deleteTable(tableName); } - if (util.getHBaseAdmin().tableExists(loadTableNameBytes)) { - util.deleteTable(loadTableNameBytes); + if (util.getHBaseAdmin().tableExists(loadTableName)) { + util.deleteTable(loadTableName); } // Create the table. If this fails then fail everything. - HTableDescriptor tableDescriptor = new HTableDescriptor(tableNameBytes); + HTableDescriptor tableDescriptor = new HTableDescriptor(tableName); // Make the max file size huge so that splits don't happen during the test. tableDescriptor.setMaxFileSize(Long.MAX_VALUE); @@ -213,7 +210,7 @@ public class IntegrationTestMTTR { util.getHBaseAdmin().createTable(tableDescriptor); // Setup the table for LoadTestTool - int ret = loadTool.run(new String[]{"-tn", loadTableName, "-init_only"}); + int ret = loadTool.run(new String[]{"-tn", loadTableName.getNameAsString(), "-init_only"}); assertEquals("Failed to initialize LoadTestTool", 0, ret); } @@ -400,7 +397,7 @@ public class IntegrationTestMTTR { public PutCallable(Future f) throws IOException { super(f); - this.table = new HTable(util.getConfiguration(), tableNameBytes); + this.table = new HTable(util.getConfiguration(), tableName); } @Override @@ -427,7 +424,7 @@ public class IntegrationTestMTTR { public ScanCallable(Future f) throws IOException { super(f); - this.table = new HTable(util.getConfiguration(), tableNameBytes); + this.table = new HTable(util.getConfiguration(), tableName); } @Override @@ -517,7 +514,7 @@ public class IntegrationTestMTTR { // But always go in just in case some action completes quickly do { int ret = loadTool.run(new String[]{ - "-tn", loadTableName, + "-tn", loadTableName.getNameAsString(), "-write", String.format("%d:%d:%d", colsPerKey, recordSize, writeThreads), "-num_keys", String.valueOf(numKeys), "-skip_init" diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java index 5992cd4..aeebfe6 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java @@ -37,6 +37,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; @@ -430,7 +431,7 @@ public class IntegrationTestBigLinkedList extends Configured implements Tool { protected void createSchema() throws IOException { HBaseAdmin admin = new HBaseAdmin(getConf()); - byte[] tableName = getTableName(getConf()); + TableName tableName = getTableName(getConf()); if (!admin.tableExists(tableName)) { HTableDescriptor htd = new HTableDescriptor(getTableName(getConf())); htd.addFamily(new HColumnDescriptor(FAMILY_NAME)); @@ -630,7 +631,7 @@ public class IntegrationTestBigLinkedList extends Configured implements Tool { scan.setCaching(10000); scan.setCacheBlocks(false); - TableMapReduceUtil.initTableMapperJob(getTableName(getConf()), scan, + TableMapReduceUtil.initTableMapperJob(getTableName(getConf()).getName(), scan, VerifyMapper.class, BytesWritable.class, BytesWritable.class, job); job.getConfiguration().setBoolean("mapred.map.tasks.speculative.execution", false); @@ -942,8 +943,8 @@ public class IntegrationTestBigLinkedList extends Configured implements Tool { } } - static byte[] getTableName(Configuration conf) { - return Bytes.toBytes(conf.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME)); + static TableName getTableName(Configuration conf) { + return TableName.valueOf(conf.get(TABLE_NAME_KEY, DEFAULT_TABLE_NAME)); } private static CINode getCINode(Result result, CINode node) { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java index 3bf7f24..b827b67 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestLoadAndVerify.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -295,10 +296,10 @@ public class IntegrationTestLoadAndVerify extends Configured implements Tool { Path outputDir = getTestDir(TEST_NAME, "load-output"); NMapInputFormat.setNumMapTasks(conf, conf.getInt(NUM_MAP_TASKS_KEY, NUM_MAP_TASKS_DEFAULT)); - conf.set(TABLE_NAME_KEY, htd.getNameAsString()); + conf.set(TABLE_NAME_KEY, htd.getTableName().getNameAsString()); Job job = new Job(conf); - job.setJobName(TEST_NAME + " Load for " + htd.getNameAsString()); + job.setJobName(TEST_NAME + " Load for " + htd.getTableName()); job.setJarByClass(this.getClass()); job.setMapperClass(LoadMapper.class); job.setInputFormatClass(NMapInputFormat.class); @@ -317,12 +318,12 @@ public class IntegrationTestLoadAndVerify extends Configured implements Tool { Job job = new Job(conf); job.setJarByClass(this.getClass()); - job.setJobName(TEST_NAME + " Verification for " + htd.getNameAsString()); + job.setJobName(TEST_NAME + " Verification for " + htd.getTableName()); Scan scan = new Scan(); TableMapReduceUtil.initTableMapperJob( - htd.getNameAsString(), scan, VerifyMapper.class, + htd.getTableName().getNameAsString(), scan, VerifyMapper.class, BytesWritable.class, BytesWritable.class, job); int scannerCaching = conf.getInt("verify.scannercaching", SCANNER_CACHING); TableMapReduceUtil.setScannerCaching(job, scannerCaching); @@ -349,7 +350,7 @@ public class IntegrationTestLoadAndVerify extends Configured implements Tool { @Test public void testLoadAndVerify() throws Exception { - HTableDescriptor htd = new HTableDescriptor(TEST_NAME); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TEST_NAME)); htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); HBaseAdmin admin = getTestingUtil().getHBaseAdmin(); @@ -367,20 +368,20 @@ public class IntegrationTestLoadAndVerify extends Configured implements Tool { private void deleteTable(HBaseAdmin admin, HTableDescriptor htd) throws IOException, InterruptedException { // Use disableTestAsync because disable can take a long time to complete - System.out.print("Disabling table " + htd.getNameAsString() +" "); - admin.disableTableAsync(htd.getName()); + System.out.print("Disabling table " + htd.getTableName() +" "); + admin.disableTableAsync(htd.getTableName()); long start = System.currentTimeMillis(); // NOTE tables can be both admin.isTableEnabled=false and // isTableDisabled=false, when disabling must use isTableDisabled! - while (!admin.isTableDisabled(htd.getName())) { + while (!admin.isTableDisabled(htd.getTableName())) { System.out.print("."); Thread.sleep(1000); } long delta = System.currentTimeMillis() - start; System.out.println(" " + delta +" ms"); - System.out.println("Deleting table " + htd.getNameAsString() +" "); - admin.deleteTable(htd.getName()); + System.out.println("Deleting table " + htd.getTableName() +" "); + admin.deleteTable(htd.getTableName()); } public void usage() { @@ -424,7 +425,7 @@ public class IntegrationTestLoadAndVerify extends Configured implements Tool { // create HTableDescriptor for specified table String table = getConf().get(TABLE_NAME_KEY, TEST_NAME); - HTableDescriptor htd = new HTableDescriptor(table); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table)); htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); HBaseAdmin admin = new HBaseAdmin(getConf()); diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AccessControlProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AccessControlProtos.java index 12b3d8c..92a0fd6 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AccessControlProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AccessControlProtos.java @@ -16,9 +16,10 @@ public final class AccessControlProtos { int getActionCount(); org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Permission.Action getAction(int index); - // optional bytes table = 2; - boolean hasTable(); - com.google.protobuf.ByteString getTable(); + // optional .TableName tableName = 2; + boolean hasTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); // optional bytes family = 3; boolean hasFamily(); @@ -148,14 +149,17 @@ public final class AccessControlProtos { return action_.get(index); } - // optional bytes table = 2; - public static final int TABLE_FIELD_NUMBER = 2; - private com.google.protobuf.ByteString table_; - public boolean hasTable() { + // optional .TableName tableName = 2; + public static final int TABLENAME_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTable() { - return table_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; } // optional bytes family = 3; @@ -180,7 +184,7 @@ public final class AccessControlProtos { private void initFields() { action_ = java.util.Collections.emptyList(); - table_ = com.google.protobuf.ByteString.EMPTY; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); family_ = com.google.protobuf.ByteString.EMPTY; qualifier_ = com.google.protobuf.ByteString.EMPTY; } @@ -189,6 +193,12 @@ public final class AccessControlProtos { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (hasTableName()) { + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -200,7 +210,7 @@ public final class AccessControlProtos { output.writeEnum(1, action_.get(i).getNumber()); } if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(2, table_); + output.writeMessage(2, tableName_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(3, family_); @@ -228,7 +238,7 @@ public final class AccessControlProtos { } if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, table_); + .computeMessageSize(2, tableName_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream @@ -263,10 +273,10 @@ public final class AccessControlProtos { boolean result = true; result = result && getActionList() .equals(other.getActionList()); - result = result && (hasTable() == other.hasTable()); - if (hasTable()) { - result = result && getTable() - .equals(other.getTable()); + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); } result = result && (hasFamily() == other.hasFamily()); if (hasFamily()) { @@ -291,9 +301,9 @@ public final class AccessControlProtos { hash = (37 * hash) + ACTION_FIELD_NUMBER; hash = (53 * hash) + hashEnumList(getActionList()); } - if (hasTable()) { - hash = (37 * hash) + TABLE_FIELD_NUMBER; - hash = (53 * hash) + getTable().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLENAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); } if (hasFamily()) { hash = (37 * hash) + FAMILY_FIELD_NUMBER; @@ -411,6 +421,7 @@ public final class AccessControlProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); } } private static Builder create() { @@ -421,7 +432,11 @@ public final class AccessControlProtos { super.clear(); action_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); - table_ = com.google.protobuf.ByteString.EMPTY; + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000002); family_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000004); @@ -473,7 +488,11 @@ public final class AccessControlProtos { if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000001; } - result.table_ = table_; + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000002; } @@ -508,8 +527,8 @@ public final class AccessControlProtos { } onChanged(); } - if (other.hasTable()) { - setTable(other.getTable()); + if (other.hasTableName()) { + mergeTableName(other.getTableName()); } if (other.hasFamily()) { setFamily(other.getFamily()); @@ -522,6 +541,12 @@ public final class AccessControlProtos { } public final boolean isInitialized() { + if (hasTableName()) { + if (!getTableName().isInitialized()) { + + return false; + } + } return true; } @@ -574,8 +599,12 @@ public final class AccessControlProtos { break; } case 18: { - bitField0_ |= 0x00000002; - table_ = input.readBytes(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(); + if (hasTableName()) { + subBuilder.mergeFrom(getTableName()); + } + input.readMessage(subBuilder, extensionRegistry); + setTableName(subBuilder.buildPartial()); break; } case 26: { @@ -645,29 +674,95 @@ public final class AccessControlProtos { return this; } - // optional bytes table = 2; - private com.google.protobuf.ByteString table_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasTable() { + // optional .TableName tableName = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + public boolean hasTableName() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public com.google.protobuf.ByteString getTable() { - return table_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } } - public Builder setTable(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - table_ = value; - onChanged(); + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; return this; } - public Builder clearTable() { + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000002); - table_ = getDefaultInstance().getTable(); - onChanged(); return this; } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } // optional bytes family = 3; private com.google.protobuf.ByteString family_ = com.google.protobuf.ByteString.EMPTY; @@ -809,6 +904,10 @@ public final class AccessControlProtos { memoizedIsInitialized = 0; return false; } + if (!getPermission().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -1099,6 +1198,10 @@ public final class AccessControlProtos { return false; } + if (!getPermission().isInitialized()) { + + return false; + } return true; } @@ -1401,6 +1504,12 @@ public final class AccessControlProtos { memoizedIsInitialized = 0; return false; } + for (int i = 0; i < getPermissionsCount(); i++) { + if (!getPermissions(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -1708,6 +1817,12 @@ public final class AccessControlProtos { return false; } + for (int i = 0; i < getPermissionsCount(); i++) { + if (!getPermissions(i).isInitialized()) { + + return false; + } + } return true; } @@ -4073,9 +4188,10 @@ public final class AccessControlProtos { public interface UserPermissionsRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional bytes table = 1; - boolean hasTable(); - com.google.protobuf.ByteString getTable(); + // optional .TableName tableName = 1; + boolean hasTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); } public static final class UserPermissionsRequest extends com.google.protobuf.GeneratedMessage @@ -4106,24 +4222,33 @@ public final class AccessControlProtos { } private int bitField0_; - // optional bytes table = 1; - public static final int TABLE_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString table_; - public boolean hasTable() { + // optional .TableName tableName = 1; + public static final int TABLENAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTable() { - return table_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; } private void initFields() { - table_ = com.google.protobuf.ByteString.EMPTY; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (hasTableName()) { + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -4132,7 +4257,7 @@ public final class AccessControlProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, table_); + output.writeMessage(1, tableName_); } getUnknownFields().writeTo(output); } @@ -4145,7 +4270,7 @@ public final class AccessControlProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, table_); + .computeMessageSize(1, tableName_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -4170,10 +4295,10 @@ public final class AccessControlProtos { org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermissionsRequest other = (org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermissionsRequest) obj; boolean result = true; - result = result && (hasTable() == other.hasTable()); - if (hasTable()) { - result = result && getTable() - .equals(other.getTable()); + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -4184,9 +4309,9 @@ public final class AccessControlProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasTable()) { - hash = (37 * hash) + TABLE_FIELD_NUMBER; - hash = (53 * hash) + getTable().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLENAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; @@ -4296,6 +4421,7 @@ public final class AccessControlProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); } } private static Builder create() { @@ -4304,7 +4430,11 @@ public final class AccessControlProtos { public Builder clear() { super.clear(); - table_ = com.google.protobuf.ByteString.EMPTY; + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -4347,7 +4477,11 @@ public final class AccessControlProtos { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.table_ = table_; + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -4364,14 +4498,20 @@ public final class AccessControlProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermissionsRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermissionsRequest.getDefaultInstance()) return this; - if (other.hasTable()) { - setTable(other.getTable()); + if (other.hasTableName()) { + mergeTableName(other.getTableName()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + if (hasTableName()) { + if (!getTableName().isInitialized()) { + + return false; + } + } return true; } @@ -4399,8 +4539,12 @@ public final class AccessControlProtos { break; } case 10: { - bitField0_ |= 0x00000001; - table_ = input.readBytes(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(); + if (hasTableName()) { + subBuilder.mergeFrom(getTableName()); + } + input.readMessage(subBuilder, extensionRegistry); + setTableName(subBuilder.buildPartial()); break; } } @@ -4409,29 +4553,95 @@ public final class AccessControlProtos { private int bitField0_; - // optional bytes table = 1; - private com.google.protobuf.ByteString table_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasTable() { + // optional .TableName tableName = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTable() { - return table_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } } - public Builder setTable(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - table_ = value; - onChanged(); + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; return this; } - public Builder clearTable() { + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); - table_ = getDefaultInstance().getTable(); - onChanged(); return this; } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } // @@protoc_insertion_point(builder_scope:UserPermissionsRequest) } @@ -5107,6 +5317,12 @@ public final class AccessControlProtos { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + for (int i = 0; i < getPermissionCount(); i++) { + if (!getPermission(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -5383,6 +5599,12 @@ public final class AccessControlProtos { } public final boolean isInitialized() { + for (int i = 0; i < getPermissionCount(); i++) { + if (!getPermission(i).isInitialized()) { + + return false; + } + } return true; } @@ -6403,33 +6625,34 @@ public final class AccessControlProtos { descriptor; static { java.lang.String[] descriptorData = { - "\n\023AccessControl.proto\"\242\001\n\nPermission\022\"\n\006" + - "action\030\001 \003(\0162\022.Permission.Action\022\r\n\005tabl" + - "e\030\002 \001(\014\022\016\n\006family\030\003 \001(\014\022\021\n\tqualifier\030\004 \001" + - "(\014\">\n\006Action\022\010\n\004READ\020\000\022\t\n\005WRITE\020\001\022\010\n\004EXE" + - "C\020\002\022\n\n\006CREATE\020\003\022\t\n\005ADMIN\020\004\"?\n\016UserPermis" + - "sion\022\014\n\004user\030\001 \002(\014\022\037\n\npermission\030\002 \002(\0132\013" + - ".Permission\"\225\001\n\024UserTablePermissions\022:\n\013" + - "permissions\030\001 \003(\0132%.UserTablePermissions" + - ".UserPermissions\032A\n\017UserPermissions\022\014\n\004u" + - "ser\030\001 \002(\014\022 \n\013permissions\030\002 \003(\0132\013.Permiss", - "ion\"3\n\014GrantRequest\022#\n\npermission\030\001 \002(\0132" + - "\017.UserPermission\"\017\n\rGrantResponse\"4\n\rRev" + - "okeRequest\022#\n\npermission\030\001 \002(\0132\017.UserPer" + - "mission\"\020\n\016RevokeResponse\"\'\n\026UserPermiss" + - "ionsRequest\022\r\n\005table\030\001 \001(\014\">\n\027UserPermis" + - "sionsResponse\022#\n\npermission\030\001 \003(\0132\017.User" + - "Permission\":\n\027CheckPermissionsRequest\022\037\n" + - "\npermission\030\001 \003(\0132\013.Permission\"\032\n\030CheckP" + - "ermissionsResponse2\373\001\n\024AccessControlServ" + - "ice\022&\n\005Grant\022\r.GrantRequest\032\016.GrantRespo", - "nse\022)\n\006Revoke\022\016.RevokeRequest\032\017.RevokeRe" + - "sponse\022G\n\022GetUserPermissions\022\027.UserPermi" + - "ssionsRequest\032\030.UserPermissionsResponse\022" + - "G\n\020CheckPermissions\022\030.CheckPermissionsRe" + - "quest\032\031.CheckPermissionsResponseBI\n*org." + - "apache.hadoop.hbase.protobuf.generatedB\023" + - "AccessControlProtosH\001\210\001\001\240\001\001" + "\n\023AccessControl.proto\032\013hbase.proto\"\262\001\n\nP" + + "ermission\022\"\n\006action\030\001 \003(\0162\022.Permission.A" + + "ction\022\035\n\ttableName\030\002 \001(\0132\n.TableName\022\016\n\006" + + "family\030\003 \001(\014\022\021\n\tqualifier\030\004 \001(\014\">\n\006Actio" + + "n\022\010\n\004READ\020\000\022\t\n\005WRITE\020\001\022\010\n\004EXEC\020\002\022\n\n\006CREA" + + "TE\020\003\022\t\n\005ADMIN\020\004\"?\n\016UserPermission\022\014\n\004use" + + "r\030\001 \002(\014\022\037\n\npermission\030\002 \002(\0132\013.Permission" + + "\"\225\001\n\024UserTablePermissions\022:\n\013permissions" + + "\030\001 \003(\0132%.UserTablePermissions.UserPermis" + + "sions\032A\n\017UserPermissions\022\014\n\004user\030\001 \002(\014\022 ", + "\n\013permissions\030\002 \003(\0132\013.Permission\"3\n\014Gran" + + "tRequest\022#\n\npermission\030\001 \002(\0132\017.UserPermi" + + "ssion\"\017\n\rGrantResponse\"4\n\rRevokeRequest\022" + + "#\n\npermission\030\001 \002(\0132\017.UserPermission\"\020\n\016" + + "RevokeResponse\"7\n\026UserPermissionsRequest" + + "\022\035\n\ttableName\030\001 \001(\0132\n.TableName\">\n\027UserP" + + "ermissionsResponse\022#\n\npermission\030\001 \003(\0132\017" + + ".UserPermission\":\n\027CheckPermissionsReque" + + "st\022\037\n\npermission\030\001 \003(\0132\013.Permission\"\032\n\030C" + + "heckPermissionsResponse2\373\001\n\024AccessContro", + "lService\022&\n\005Grant\022\r.GrantRequest\032\016.Grant" + + "Response\022)\n\006Revoke\022\016.RevokeRequest\032\017.Rev" + + "okeResponse\022G\n\022GetUserPermissions\022\027.User" + + "PermissionsRequest\032\030.UserPermissionsResp" + + "onse\022G\n\020CheckPermissions\022\030.CheckPermissi" + + "onsRequest\032\031.CheckPermissionsResponseBI\n" + + "*org.apache.hadoop.hbase.protobuf.genera" + + "tedB\023AccessControlProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -6441,7 +6664,7 @@ public final class AccessControlProtos { internal_static_Permission_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Permission_descriptor, - new java.lang.String[] { "Action", "Table", "Family", "Qualifier", }, + new java.lang.String[] { "Action", "TableName", "Family", "Qualifier", }, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Permission.class, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Permission.Builder.class); internal_static_UserPermission_descriptor = @@ -6505,7 +6728,7 @@ public final class AccessControlProtos { internal_static_UserPermissionsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_UserPermissionsRequest_descriptor, - new java.lang.String[] { "Table", }, + new java.lang.String[] { "TableName", }, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermissionsRequest.class, org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermissionsRequest.Builder.class); internal_static_UserPermissionsResponse_descriptor = @@ -6538,6 +6761,7 @@ public final class AccessControlProtos { com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), }, assigner); } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java index f4bceeb..f520368 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java @@ -92,173 +92,82 @@ public final class HBaseProtos { // @@protoc_insertion_point(enum_scope:CompareType) } - public interface TableSchemaOrBuilder + public interface TableNameOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional bytes name = 1; - boolean hasName(); - com.google.protobuf.ByteString getName(); - - // repeated .BytesBytesPair attributes = 2; - java.util.List - getAttributesList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index); - int getAttributesCount(); - java.util.List - getAttributesOrBuilderList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( - int index); - - // repeated .ColumnFamilySchema column_families = 3; - java.util.List - getColumnFamiliesList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnFamilies(int index); - int getColumnFamiliesCount(); - java.util.List - getColumnFamiliesOrBuilderList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnFamiliesOrBuilder( - int index); + // required bytes namespace = 1; + boolean hasNamespace(); + com.google.protobuf.ByteString getNamespace(); - // repeated .NameStringPair configuration = 4; - java.util.List - getConfigurationList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index); - int getConfigurationCount(); - java.util.List - getConfigurationOrBuilderList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( - int index); + // required bytes qualifier = 2; + boolean hasQualifier(); + com.google.protobuf.ByteString getQualifier(); } - public static final class TableSchema extends + public static final class TableName extends com.google.protobuf.GeneratedMessage - implements TableSchemaOrBuilder { - // Use TableSchema.newBuilder() to construct. - private TableSchema(Builder builder) { + implements TableNameOrBuilder { + // Use TableName.newBuilder() to construct. + private TableName(Builder builder) { super(builder); } - private TableSchema(boolean noInit) {} + private TableName(boolean noInit) {} - private static final TableSchema defaultInstance; - public static TableSchema getDefaultInstance() { + private static final TableName defaultInstance; + public static TableName getDefaultInstance() { return defaultInstance; } - public TableSchema getDefaultInstanceForType() { + public TableName getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableSchema_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableName_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableSchema_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableName_fieldAccessorTable; } private int bitField0_; - // optional bytes name = 1; - public static final int NAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString name_; - public boolean hasName() { + // required bytes namespace = 1; + public static final int NAMESPACE_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString namespace_; + public boolean hasNamespace() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getName() { - return name_; - } - - // repeated .BytesBytesPair attributes = 2; - public static final int ATTRIBUTES_FIELD_NUMBER = 2; - private java.util.List attributes_; - public java.util.List getAttributesList() { - return attributes_; - } - public java.util.List - getAttributesOrBuilderList() { - return attributes_; - } - public int getAttributesCount() { - return attributes_.size(); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index) { - return attributes_.get(index); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( - int index) { - return attributes_.get(index); - } - - // repeated .ColumnFamilySchema column_families = 3; - public static final int COLUMN_FAMILIES_FIELD_NUMBER = 3; - private java.util.List columnFamilies_; - public java.util.List getColumnFamiliesList() { - return columnFamilies_; - } - public java.util.List - getColumnFamiliesOrBuilderList() { - return columnFamilies_; - } - public int getColumnFamiliesCount() { - return columnFamilies_.size(); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnFamilies(int index) { - return columnFamilies_.get(index); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnFamiliesOrBuilder( - int index) { - return columnFamilies_.get(index); + public com.google.protobuf.ByteString getNamespace() { + return namespace_; } - // repeated .NameStringPair configuration = 4; - public static final int CONFIGURATION_FIELD_NUMBER = 4; - private java.util.List configuration_; - public java.util.List getConfigurationList() { - return configuration_; - } - public java.util.List - getConfigurationOrBuilderList() { - return configuration_; - } - public int getConfigurationCount() { - return configuration_.size(); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) { - return configuration_.get(index); + // required bytes qualifier = 2; + public static final int QUALIFIER_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString qualifier_; + public boolean hasQualifier() { + return ((bitField0_ & 0x00000002) == 0x00000002); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( - int index) { - return configuration_.get(index); + public com.google.protobuf.ByteString getQualifier() { + return qualifier_; } private void initFields() { - name_ = com.google.protobuf.ByteString.EMPTY; - attributes_ = java.util.Collections.emptyList(); - columnFamilies_ = java.util.Collections.emptyList(); - configuration_ = java.util.Collections.emptyList(); + namespace_ = com.google.protobuf.ByteString.EMPTY; + qualifier_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - for (int i = 0; i < getAttributesCount(); i++) { - if (!getAttributes(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - for (int i = 0; i < getColumnFamiliesCount(); i++) { - if (!getColumnFamilies(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + if (!hasNamespace()) { + memoizedIsInitialized = 0; + return false; } - for (int i = 0; i < getConfigurationCount(); i++) { - if (!getConfiguration(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + if (!hasQualifier()) { + memoizedIsInitialized = 0; + return false; } memoizedIsInitialized = 1; return true; @@ -268,16 +177,10 @@ public final class HBaseProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, name_); - } - for (int i = 0; i < attributes_.size(); i++) { - output.writeMessage(2, attributes_.get(i)); + output.writeBytes(1, namespace_); } - for (int i = 0; i < columnFamilies_.size(); i++) { - output.writeMessage(3, columnFamilies_.get(i)); - } - for (int i = 0; i < configuration_.size(); i++) { - output.writeMessage(4, configuration_.get(i)); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, qualifier_); } getUnknownFields().writeTo(output); } @@ -290,19 +193,11 @@ public final class HBaseProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, name_); - } - for (int i = 0; i < attributes_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, attributes_.get(i)); - } - for (int i = 0; i < columnFamilies_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, columnFamilies_.get(i)); + .computeBytesSize(1, namespace_); } - for (int i = 0; i < configuration_.size(); i++) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, configuration_.get(i)); + .computeBytesSize(2, qualifier_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -321,23 +216,22 @@ public final class HBaseProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName) obj; boolean result = true; - result = result && (hasName() == other.hasName()); - if (hasName()) { - result = result && getName() - .equals(other.getName()); + result = result && (hasNamespace() == other.hasNamespace()); + if (hasNamespace()) { + result = result && getNamespace() + .equals(other.getNamespace()); + } + result = result && (hasQualifier() == other.hasQualifier()); + if (hasQualifier()) { + result = result && getQualifier() + .equals(other.getQualifier()); } - result = result && getAttributesList() - .equals(other.getAttributesList()); - result = result && getColumnFamiliesList() - .equals(other.getColumnFamiliesList()); - result = result && getConfigurationList() - .equals(other.getConfigurationList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -347,61 +241,53 @@ public final class HBaseProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasName()) { - hash = (37 * hash) + NAME_FIELD_NUMBER; - hash = (53 * hash) + getName().hashCode(); - } - if (getAttributesCount() > 0) { - hash = (37 * hash) + ATTRIBUTES_FIELD_NUMBER; - hash = (53 * hash) + getAttributesList().hashCode(); - } - if (getColumnFamiliesCount() > 0) { - hash = (37 * hash) + COLUMN_FAMILIES_FIELD_NUMBER; - hash = (53 * hash) + getColumnFamiliesList().hashCode(); + if (hasNamespace()) { + hash = (37 * hash) + NAMESPACE_FIELD_NUMBER; + hash = (53 * hash) + getNamespace().hashCode(); } - if (getConfigurationCount() > 0) { - hash = (37 * hash) + CONFIGURATION_FIELD_NUMBER; - hash = (53 * hash) + getConfigurationList().hashCode(); + if (hasQualifier()) { + hash = (37 * hash) + QUALIFIER_FIELD_NUMBER; + hash = (53 * hash) + getQualifier().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -410,7 +296,7 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -421,12 +307,12 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -436,7 +322,7 @@ public final class HBaseProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -449,18 +335,18 @@ public final class HBaseProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableSchema_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableName_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableSchema_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableName_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -471,9 +357,6 @@ public final class HBaseProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getAttributesFieldBuilder(); - getColumnFamiliesFieldBuilder(); - getConfigurationFieldBuilder(); } } private static Builder create() { @@ -482,26 +365,10 @@ public final class HBaseProtos { public Builder clear() { super.clear(); - name_ = com.google.protobuf.ByteString.EMPTY; + namespace_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); - if (attributesBuilder_ == null) { - attributes_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - attributesBuilder_.clear(); - } - if (columnFamiliesBuilder_ == null) { - columnFamilies_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - } else { - columnFamiliesBuilder_.clear(); - } - if (configurationBuilder_ == null) { - configuration_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - } else { - configurationBuilder_.clear(); - } + qualifier_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -511,24 +378,24 @@ public final class HBaseProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -536,160 +403,52 @@ public final class HBaseProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.name_ = name_; - if (attributesBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - attributes_ = java.util.Collections.unmodifiableList(attributes_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.attributes_ = attributes_; - } else { - result.attributes_ = attributesBuilder_.build(); + result.namespace_ = namespace_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; } - if (columnFamiliesBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004)) { - columnFamilies_ = java.util.Collections.unmodifiableList(columnFamilies_); - bitField0_ = (bitField0_ & ~0x00000004); - } - result.columnFamilies_ = columnFamilies_; + result.qualifier_ = qualifier_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName)other); } else { - result.columnFamilies_ = columnFamiliesBuilder_.build(); - } - if (configurationBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008)) { - configuration_ = java.util.Collections.unmodifiableList(configuration_); - bitField0_ = (bitField0_ & ~0x00000008); - } - result.configuration_ = configuration_; - } else { - result.configuration_ = configurationBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema)other); - } else { - super.mergeFrom(other); - return this; + super.mergeFrom(other); + return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) return this; - if (other.hasName()) { - setName(other.getName()); - } - if (attributesBuilder_ == null) { - if (!other.attributes_.isEmpty()) { - if (attributes_.isEmpty()) { - attributes_ = other.attributes_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureAttributesIsMutable(); - attributes_.addAll(other.attributes_); - } - onChanged(); - } - } else { - if (!other.attributes_.isEmpty()) { - if (attributesBuilder_.isEmpty()) { - attributesBuilder_.dispose(); - attributesBuilder_ = null; - attributes_ = other.attributes_; - bitField0_ = (bitField0_ & ~0x00000002); - attributesBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getAttributesFieldBuilder() : null; - } else { - attributesBuilder_.addAllMessages(other.attributes_); - } - } - } - if (columnFamiliesBuilder_ == null) { - if (!other.columnFamilies_.isEmpty()) { - if (columnFamilies_.isEmpty()) { - columnFamilies_ = other.columnFamilies_; - bitField0_ = (bitField0_ & ~0x00000004); - } else { - ensureColumnFamiliesIsMutable(); - columnFamilies_.addAll(other.columnFamilies_); - } - onChanged(); - } - } else { - if (!other.columnFamilies_.isEmpty()) { - if (columnFamiliesBuilder_.isEmpty()) { - columnFamiliesBuilder_.dispose(); - columnFamiliesBuilder_ = null; - columnFamilies_ = other.columnFamilies_; - bitField0_ = (bitField0_ & ~0x00000004); - columnFamiliesBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getColumnFamiliesFieldBuilder() : null; - } else { - columnFamiliesBuilder_.addAllMessages(other.columnFamilies_); - } - } + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) return this; + if (other.hasNamespace()) { + setNamespace(other.getNamespace()); } - if (configurationBuilder_ == null) { - if (!other.configuration_.isEmpty()) { - if (configuration_.isEmpty()) { - configuration_ = other.configuration_; - bitField0_ = (bitField0_ & ~0x00000008); - } else { - ensureConfigurationIsMutable(); - configuration_.addAll(other.configuration_); - } - onChanged(); - } - } else { - if (!other.configuration_.isEmpty()) { - if (configurationBuilder_.isEmpty()) { - configurationBuilder_.dispose(); - configurationBuilder_ = null; - configuration_ = other.configuration_; - bitField0_ = (bitField0_ & ~0x00000008); - configurationBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getConfigurationFieldBuilder() : null; - } else { - configurationBuilder_.addAllMessages(other.configuration_); - } - } + if (other.hasQualifier()) { + setQualifier(other.getQualifier()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - for (int i = 0; i < getAttributesCount(); i++) { - if (!getAttributes(i).isInitialized()) { - - return false; - } - } - for (int i = 0; i < getColumnFamiliesCount(); i++) { - if (!getColumnFamilies(i).isInitialized()) { - - return false; - } + if (!hasNamespace()) { + + return false; } - for (int i = 0; i < getConfigurationCount(); i++) { - if (!getConfiguration(i).isInitialized()) { - - return false; - } + if (!hasQualifier()) { + + return false; } return true; } @@ -719,25 +478,12 @@ public final class HBaseProtos { } case 10: { bitField0_ |= 0x00000001; - name_ = input.readBytes(); + namespace_ = input.readBytes(); break; } case 18: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addAttributes(subBuilder.buildPartial()); - break; - } - case 26: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addColumnFamilies(subBuilder.buildPartial()); - break; - } - case 34: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addConfiguration(subBuilder.buildPartial()); + bitField0_ |= 0x00000002; + qualifier_ = input.readBytes(); break; } } @@ -746,409 +492,2181 @@ public final class HBaseProtos { private int bitField0_; - // optional bytes name = 1; - private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasName() { + // required bytes namespace = 1; + private com.google.protobuf.ByteString namespace_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasNamespace() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getName() { - return name_; + public com.google.protobuf.ByteString getNamespace() { + return namespace_; } - public Builder setName(com.google.protobuf.ByteString value) { + public Builder setNamespace(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - name_ = value; + namespace_ = value; onChanged(); return this; } - public Builder clearName() { + public Builder clearNamespace() { bitField0_ = (bitField0_ & ~0x00000001); - name_ = getDefaultInstance().getName(); + namespace_ = getDefaultInstance().getNamespace(); onChanged(); return this; } - // repeated .BytesBytesPair attributes = 2; - private java.util.List attributes_ = - java.util.Collections.emptyList(); - private void ensureAttributesIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - attributes_ = new java.util.ArrayList(attributes_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> attributesBuilder_; - - public java.util.List getAttributesList() { - if (attributesBuilder_ == null) { - return java.util.Collections.unmodifiableList(attributes_); - } else { - return attributesBuilder_.getMessageList(); - } - } - public int getAttributesCount() { - if (attributesBuilder_ == null) { - return attributes_.size(); - } else { - return attributesBuilder_.getCount(); - } - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index) { - if (attributesBuilder_ == null) { - return attributes_.get(index); - } else { - return attributesBuilder_.getMessage(index); - } - } - public Builder setAttributes( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { - if (attributesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureAttributesIsMutable(); - attributes_.set(index, value); - onChanged(); - } else { - attributesBuilder_.setMessage(index, value); - } - return this; - } - public Builder setAttributes( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { - if (attributesBuilder_ == null) { - ensureAttributesIsMutable(); - attributes_.set(index, builderForValue.build()); - onChanged(); - } else { - attributesBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAttributes(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { - if (attributesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureAttributesIsMutable(); - attributes_.add(value); - onChanged(); - } else { - attributesBuilder_.addMessage(value); - } - return this; + // required bytes qualifier = 2; + private com.google.protobuf.ByteString qualifier_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasQualifier() { + return ((bitField0_ & 0x00000002) == 0x00000002); } - public Builder addAttributes( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { - if (attributesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureAttributesIsMutable(); - attributes_.add(index, value); - onChanged(); - } else { - attributesBuilder_.addMessage(index, value); - } - return this; + public com.google.protobuf.ByteString getQualifier() { + return qualifier_; } - public Builder addAttributes( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { - if (attributesBuilder_ == null) { - ensureAttributesIsMutable(); - attributes_.add(builderForValue.build()); - onChanged(); - } else { - attributesBuilder_.addMessage(builderForValue.build()); - } + public Builder setQualifier(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + qualifier_ = value; + onChanged(); return this; } - public Builder addAttributes( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { - if (attributesBuilder_ == null) { - ensureAttributesIsMutable(); - attributes_.add(index, builderForValue.build()); - onChanged(); - } else { - attributesBuilder_.addMessage(index, builderForValue.build()); - } + public Builder clearQualifier() { + bitField0_ = (bitField0_ & ~0x00000002); + qualifier_ = getDefaultInstance().getQualifier(); + onChanged(); return this; } - public Builder addAllAttributes( + + // @@protoc_insertion_point(builder_scope:TableName) + } + + static { + defaultInstance = new TableName(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:TableName) + } + + public interface TableSchemaOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .TableName table_name = 1; + boolean hasTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + + // repeated .BytesBytesPair attributes = 2; + java.util.List + getAttributesList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index); + int getAttributesCount(); + java.util.List + getAttributesOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( + int index); + + // repeated .ColumnFamilySchema column_families = 3; + java.util.List + getColumnFamiliesList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnFamilies(int index); + int getColumnFamiliesCount(); + java.util.List + getColumnFamiliesOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnFamiliesOrBuilder( + int index); + + // repeated .NameStringPair configuration = 4; + java.util.List + getConfigurationList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index); + int getConfigurationCount(); + java.util.List + getConfigurationOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( + int index); + } + public static final class TableSchema extends + com.google.protobuf.GeneratedMessage + implements TableSchemaOrBuilder { + // Use TableSchema.newBuilder() to construct. + private TableSchema(Builder builder) { + super(builder); + } + private TableSchema(boolean noInit) {} + + private static final TableSchema defaultInstance; + public static TableSchema getDefaultInstance() { + return defaultInstance; + } + + public TableSchema getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableSchema_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableSchema_fieldAccessorTable; + } + + private int bitField0_; + // optional .TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + // repeated .BytesBytesPair attributes = 2; + public static final int ATTRIBUTES_FIELD_NUMBER = 2; + private java.util.List attributes_; + public java.util.List getAttributesList() { + return attributes_; + } + public java.util.List + getAttributesOrBuilderList() { + return attributes_; + } + public int getAttributesCount() { + return attributes_.size(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index) { + return attributes_.get(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( + int index) { + return attributes_.get(index); + } + + // repeated .ColumnFamilySchema column_families = 3; + public static final int COLUMN_FAMILIES_FIELD_NUMBER = 3; + private java.util.List columnFamilies_; + public java.util.List getColumnFamiliesList() { + return columnFamilies_; + } + public java.util.List + getColumnFamiliesOrBuilderList() { + return columnFamilies_; + } + public int getColumnFamiliesCount() { + return columnFamilies_.size(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnFamilies(int index) { + return columnFamilies_.get(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnFamiliesOrBuilder( + int index) { + return columnFamilies_.get(index); + } + + // repeated .NameStringPair configuration = 4; + public static final int CONFIGURATION_FIELD_NUMBER = 4; + private java.util.List configuration_; + public java.util.List getConfigurationList() { + return configuration_; + } + public java.util.List + getConfigurationOrBuilderList() { + return configuration_; + } + public int getConfigurationCount() { + return configuration_.size(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) { + return configuration_.get(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( + int index) { + return configuration_.get(index); + } + + private void initFields() { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + attributes_ = java.util.Collections.emptyList(); + columnFamilies_ = java.util.Collections.emptyList(); + configuration_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasTableName()) { + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getAttributesCount(); i++) { + if (!getAttributes(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getColumnFamiliesCount(); i++) { + if (!getColumnFamilies(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getConfigurationCount(); i++) { + if (!getConfiguration(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableName_); + } + for (int i = 0; i < attributes_.size(); i++) { + output.writeMessage(2, attributes_.get(i)); + } + for (int i = 0; i < columnFamilies_.size(); i++) { + output.writeMessage(3, columnFamilies_.get(i)); + } + for (int i = 0; i < configuration_.size(); i++) { + output.writeMessage(4, configuration_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_); + } + for (int i = 0; i < attributes_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, attributes_.get(i)); + } + for (int i = 0; i < columnFamilies_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, columnFamilies_.get(i)); + } + for (int i = 0; i < configuration_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, configuration_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && getAttributesList() + .equals(other.getAttributesList()); + result = result && getColumnFamiliesList() + .equals(other.getColumnFamiliesList()); + result = result && getConfigurationList() + .equals(other.getConfigurationList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (getAttributesCount() > 0) { + hash = (37 * hash) + ATTRIBUTES_FIELD_NUMBER; + hash = (53 * hash) + getAttributesList().hashCode(); + } + if (getColumnFamiliesCount() > 0) { + hash = (37 * hash) + COLUMN_FAMILIES_FIELD_NUMBER; + hash = (53 * hash) + getColumnFamiliesList().hashCode(); + } + if (getConfigurationCount() > 0) { + hash = (37 * hash) + CONFIGURATION_FIELD_NUMBER; + hash = (53 * hash) + getConfigurationList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableSchema_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableSchema_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + getAttributesFieldBuilder(); + getColumnFamiliesFieldBuilder(); + getConfigurationFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (attributesBuilder_ == null) { + attributes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + attributesBuilder_.clear(); + } + if (columnFamiliesBuilder_ == null) { + columnFamilies_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + columnFamiliesBuilder_.clear(); + } + if (configurationBuilder_ == null) { + configuration_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + } else { + configurationBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (attributesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + attributes_ = java.util.Collections.unmodifiableList(attributes_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.attributes_ = attributes_; + } else { + result.attributes_ = attributesBuilder_.build(); + } + if (columnFamiliesBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + columnFamilies_ = java.util.Collections.unmodifiableList(columnFamilies_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.columnFamilies_ = columnFamilies_; + } else { + result.columnFamilies_ = columnFamiliesBuilder_.build(); + } + if (configurationBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + configuration_ = java.util.Collections.unmodifiableList(configuration_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.configuration_ = configuration_; + } else { + result.configuration_ = configurationBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) return this; + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (attributesBuilder_ == null) { + if (!other.attributes_.isEmpty()) { + if (attributes_.isEmpty()) { + attributes_ = other.attributes_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureAttributesIsMutable(); + attributes_.addAll(other.attributes_); + } + onChanged(); + } + } else { + if (!other.attributes_.isEmpty()) { + if (attributesBuilder_.isEmpty()) { + attributesBuilder_.dispose(); + attributesBuilder_ = null; + attributes_ = other.attributes_; + bitField0_ = (bitField0_ & ~0x00000002); + attributesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getAttributesFieldBuilder() : null; + } else { + attributesBuilder_.addAllMessages(other.attributes_); + } + } + } + if (columnFamiliesBuilder_ == null) { + if (!other.columnFamilies_.isEmpty()) { + if (columnFamilies_.isEmpty()) { + columnFamilies_ = other.columnFamilies_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureColumnFamiliesIsMutable(); + columnFamilies_.addAll(other.columnFamilies_); + } + onChanged(); + } + } else { + if (!other.columnFamilies_.isEmpty()) { + if (columnFamiliesBuilder_.isEmpty()) { + columnFamiliesBuilder_.dispose(); + columnFamiliesBuilder_ = null; + columnFamilies_ = other.columnFamilies_; + bitField0_ = (bitField0_ & ~0x00000004); + columnFamiliesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getColumnFamiliesFieldBuilder() : null; + } else { + columnFamiliesBuilder_.addAllMessages(other.columnFamilies_); + } + } + } + if (configurationBuilder_ == null) { + if (!other.configuration_.isEmpty()) { + if (configuration_.isEmpty()) { + configuration_ = other.configuration_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureConfigurationIsMutable(); + configuration_.addAll(other.configuration_); + } + onChanged(); + } + } else { + if (!other.configuration_.isEmpty()) { + if (configurationBuilder_.isEmpty()) { + configurationBuilder_.dispose(); + configurationBuilder_ = null; + configuration_ = other.configuration_; + bitField0_ = (bitField0_ & ~0x00000008); + configurationBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getConfigurationFieldBuilder() : null; + } else { + configurationBuilder_.addAllMessages(other.configuration_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasTableName()) { + if (!getTableName().isInitialized()) { + + return false; + } + } + for (int i = 0; i < getAttributesCount(); i++) { + if (!getAttributes(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getColumnFamiliesCount(); i++) { + if (!getColumnFamilies(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getConfigurationCount(); i++) { + if (!getConfiguration(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(); + if (hasTableName()) { + subBuilder.mergeFrom(getTableName()); + } + input.readMessage(subBuilder, extensionRegistry); + setTableName(subBuilder.buildPartial()); + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addAttributes(subBuilder.buildPartial()); + break; + } + case 26: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addColumnFamilies(subBuilder.buildPartial()); + break; + } + case 34: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addConfiguration(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // optional .TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // repeated .BytesBytesPair attributes = 2; + private java.util.List attributes_ = + java.util.Collections.emptyList(); + private void ensureAttributesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + attributes_ = new java.util.ArrayList(attributes_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> attributesBuilder_; + + public java.util.List getAttributesList() { + if (attributesBuilder_ == null) { + return java.util.Collections.unmodifiableList(attributes_); + } else { + return attributesBuilder_.getMessageList(); + } + } + public int getAttributesCount() { + if (attributesBuilder_ == null) { + return attributes_.size(); + } else { + return attributesBuilder_.getCount(); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index) { + if (attributesBuilder_ == null) { + return attributes_.get(index); + } else { + return attributesBuilder_.getMessage(index); + } + } + public Builder setAttributes( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { + if (attributesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAttributesIsMutable(); + attributes_.set(index, value); + onChanged(); + } else { + attributesBuilder_.setMessage(index, value); + } + return this; + } + public Builder setAttributes( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { + if (attributesBuilder_ == null) { + ensureAttributesIsMutable(); + attributes_.set(index, builderForValue.build()); + onChanged(); + } else { + attributesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAttributes(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { + if (attributesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAttributesIsMutable(); + attributes_.add(value); + onChanged(); + } else { + attributesBuilder_.addMessage(value); + } + return this; + } + public Builder addAttributes( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { + if (attributesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAttributesIsMutable(); + attributes_.add(index, value); + onChanged(); + } else { + attributesBuilder_.addMessage(index, value); + } + return this; + } + public Builder addAttributes( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { + if (attributesBuilder_ == null) { + ensureAttributesIsMutable(); + attributes_.add(builderForValue.build()); + onChanged(); + } else { + attributesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addAttributes( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { + if (attributesBuilder_ == null) { + ensureAttributesIsMutable(); + attributes_.add(index, builderForValue.build()); + onChanged(); + } else { + attributesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllAttributes( java.lang.Iterable values) { if (attributesBuilder_ == null) { - ensureAttributesIsMutable(); - super.addAll(values, attributes_); - onChanged(); + ensureAttributesIsMutable(); + super.addAll(values, attributes_); + onChanged(); + } else { + attributesBuilder_.addAllMessages(values); + } + return this; + } + public Builder clearAttributes() { + if (attributesBuilder_ == null) { + attributes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + attributesBuilder_.clear(); + } + return this; + } + public Builder removeAttributes(int index) { + if (attributesBuilder_ == null) { + ensureAttributesIsMutable(); + attributes_.remove(index); + onChanged(); + } else { + attributesBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder getAttributesBuilder( + int index) { + return getAttributesFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( + int index) { + if (attributesBuilder_ == null) { + return attributes_.get(index); } else { + return attributesBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getAttributesOrBuilderList() { + if (attributesBuilder_ != null) { + return attributesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(attributes_); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder addAttributesBuilder() { + return getAttributesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance()); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder addAttributesBuilder( + int index) { + return getAttributesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance()); + } + public java.util.List + getAttributesBuilderList() { + return getAttributesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> + getAttributesFieldBuilder() { + if (attributesBuilder_ == null) { + attributesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder>( + attributes_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + attributes_ = null; + } + return attributesBuilder_; + } + + // repeated .ColumnFamilySchema column_families = 3; + private java.util.List columnFamilies_ = + java.util.Collections.emptyList(); + private void ensureColumnFamiliesIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + columnFamilies_ = new java.util.ArrayList(columnFamilies_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder> columnFamiliesBuilder_; + + public java.util.List getColumnFamiliesList() { + if (columnFamiliesBuilder_ == null) { + return java.util.Collections.unmodifiableList(columnFamilies_); + } else { + return columnFamiliesBuilder_.getMessageList(); + } + } + public int getColumnFamiliesCount() { + if (columnFamiliesBuilder_ == null) { + return columnFamilies_.size(); + } else { + return columnFamiliesBuilder_.getCount(); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnFamilies(int index) { + if (columnFamiliesBuilder_ == null) { + return columnFamilies_.get(index); + } else { + return columnFamiliesBuilder_.getMessage(index); + } + } + public Builder setColumnFamilies( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) { + if (columnFamiliesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnFamiliesIsMutable(); + columnFamilies_.set(index, value); + onChanged(); + } else { + columnFamiliesBuilder_.setMessage(index, value); + } + return this; + } + public Builder setColumnFamilies( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder builderForValue) { + if (columnFamiliesBuilder_ == null) { + ensureColumnFamiliesIsMutable(); + columnFamilies_.set(index, builderForValue.build()); + onChanged(); + } else { + columnFamiliesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addColumnFamilies(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) { + if (columnFamiliesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnFamiliesIsMutable(); + columnFamilies_.add(value); + onChanged(); + } else { + columnFamiliesBuilder_.addMessage(value); + } + return this; + } + public Builder addColumnFamilies( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) { + if (columnFamiliesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureColumnFamiliesIsMutable(); + columnFamilies_.add(index, value); + onChanged(); + } else { + columnFamiliesBuilder_.addMessage(index, value); + } + return this; + } + public Builder addColumnFamilies( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder builderForValue) { + if (columnFamiliesBuilder_ == null) { + ensureColumnFamiliesIsMutable(); + columnFamilies_.add(builderForValue.build()); + onChanged(); + } else { + columnFamiliesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addColumnFamilies( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder builderForValue) { + if (columnFamiliesBuilder_ == null) { + ensureColumnFamiliesIsMutable(); + columnFamilies_.add(index, builderForValue.build()); + onChanged(); + } else { + columnFamiliesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllColumnFamilies( + java.lang.Iterable values) { + if (columnFamiliesBuilder_ == null) { + ensureColumnFamiliesIsMutable(); + super.addAll(values, columnFamilies_); + onChanged(); + } else { + columnFamiliesBuilder_.addAllMessages(values); + } + return this; + } + public Builder clearColumnFamilies() { + if (columnFamiliesBuilder_ == null) { + columnFamilies_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + columnFamiliesBuilder_.clear(); + } + return this; + } + public Builder removeColumnFamilies(int index) { + if (columnFamiliesBuilder_ == null) { + ensureColumnFamiliesIsMutable(); + columnFamilies_.remove(index); + onChanged(); + } else { + columnFamiliesBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder getColumnFamiliesBuilder( + int index) { + return getColumnFamiliesFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnFamiliesOrBuilder( + int index) { + if (columnFamiliesBuilder_ == null) { + return columnFamilies_.get(index); } else { + return columnFamiliesBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getColumnFamiliesOrBuilderList() { + if (columnFamiliesBuilder_ != null) { + return columnFamiliesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(columnFamilies_); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder addColumnFamiliesBuilder() { + return getColumnFamiliesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance()); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder addColumnFamiliesBuilder( + int index) { + return getColumnFamiliesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance()); + } + public java.util.List + getColumnFamiliesBuilderList() { + return getColumnFamiliesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder> + getColumnFamiliesFieldBuilder() { + if (columnFamiliesBuilder_ == null) { + columnFamiliesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>( + columnFamilies_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + columnFamilies_ = null; + } + return columnFamiliesBuilder_; + } + + // repeated .NameStringPair configuration = 4; + private java.util.List configuration_ = + java.util.Collections.emptyList(); + private void ensureConfigurationIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + configuration_ = new java.util.ArrayList(configuration_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> configurationBuilder_; + + public java.util.List getConfigurationList() { + if (configurationBuilder_ == null) { + return java.util.Collections.unmodifiableList(configuration_); + } else { + return configurationBuilder_.getMessageList(); + } + } + public int getConfigurationCount() { + if (configurationBuilder_ == null) { + return configuration_.size(); + } else { + return configurationBuilder_.getCount(); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) { + if (configurationBuilder_ == null) { + return configuration_.get(index); + } else { + return configurationBuilder_.getMessage(index); + } + } + public Builder setConfiguration( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { + if (configurationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureConfigurationIsMutable(); + configuration_.set(index, value); + onChanged(); + } else { + configurationBuilder_.setMessage(index, value); + } + return this; + } + public Builder setConfiguration( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { + if (configurationBuilder_ == null) { + ensureConfigurationIsMutable(); + configuration_.set(index, builderForValue.build()); + onChanged(); + } else { + configurationBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addConfiguration(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { + if (configurationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureConfigurationIsMutable(); + configuration_.add(value); + onChanged(); + } else { + configurationBuilder_.addMessage(value); + } + return this; + } + public Builder addConfiguration( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { + if (configurationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureConfigurationIsMutable(); + configuration_.add(index, value); + onChanged(); + } else { + configurationBuilder_.addMessage(index, value); + } + return this; + } + public Builder addConfiguration( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { + if (configurationBuilder_ == null) { + ensureConfigurationIsMutable(); + configuration_.add(builderForValue.build()); + onChanged(); + } else { + configurationBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addConfiguration( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { + if (configurationBuilder_ == null) { + ensureConfigurationIsMutable(); + configuration_.add(index, builderForValue.build()); + onChanged(); + } else { + configurationBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllConfiguration( + java.lang.Iterable values) { + if (configurationBuilder_ == null) { + ensureConfigurationIsMutable(); + super.addAll(values, configuration_); + onChanged(); + } else { + configurationBuilder_.addAllMessages(values); + } + return this; + } + public Builder clearConfiguration() { + if (configurationBuilder_ == null) { + configuration_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + configurationBuilder_.clear(); + } + return this; + } + public Builder removeConfiguration(int index) { + if (configurationBuilder_ == null) { + ensureConfigurationIsMutable(); + configuration_.remove(index); + onChanged(); + } else { + configurationBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder getConfigurationBuilder( + int index) { + return getConfigurationFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( + int index) { + if (configurationBuilder_ == null) { + return configuration_.get(index); } else { + return configurationBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getConfigurationOrBuilderList() { + if (configurationBuilder_ != null) { + return configurationBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(configuration_); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addConfigurationBuilder() { + return getConfigurationFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addConfigurationBuilder( + int index) { + return getConfigurationFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()); + } + public java.util.List + getConfigurationBuilderList() { + return getConfigurationFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> + getConfigurationFieldBuilder() { + if (configurationBuilder_ == null) { + configurationBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder>( + configuration_, + ((bitField0_ & 0x00000008) == 0x00000008), + getParentForChildren(), + isClean()); + configuration_ = null; + } + return configurationBuilder_; + } + + // @@protoc_insertion_point(builder_scope:TableSchema) + } + + static { + defaultInstance = new TableSchema(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:TableSchema) + } + + public interface ColumnFamilySchemaOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bytes name = 1; + boolean hasName(); + com.google.protobuf.ByteString getName(); + + // repeated .BytesBytesPair attributes = 2; + java.util.List + getAttributesList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index); + int getAttributesCount(); + java.util.List + getAttributesOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( + int index); + + // repeated .NameStringPair configuration = 3; + java.util.List + getConfigurationList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index); + int getConfigurationCount(); + java.util.List + getConfigurationOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( + int index); + } + public static final class ColumnFamilySchema extends + com.google.protobuf.GeneratedMessage + implements ColumnFamilySchemaOrBuilder { + // Use ColumnFamilySchema.newBuilder() to construct. + private ColumnFamilySchema(Builder builder) { + super(builder); + } + private ColumnFamilySchema(boolean noInit) {} + + private static final ColumnFamilySchema defaultInstance; + public static ColumnFamilySchema getDefaultInstance() { + return defaultInstance; + } + + public ColumnFamilySchema getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ColumnFamilySchema_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ColumnFamilySchema_fieldAccessorTable; + } + + private int bitField0_; + // required bytes name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString name_; + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public com.google.protobuf.ByteString getName() { + return name_; + } + + // repeated .BytesBytesPair attributes = 2; + public static final int ATTRIBUTES_FIELD_NUMBER = 2; + private java.util.List attributes_; + public java.util.List getAttributesList() { + return attributes_; + } + public java.util.List + getAttributesOrBuilderList() { + return attributes_; + } + public int getAttributesCount() { + return attributes_.size(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index) { + return attributes_.get(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( + int index) { + return attributes_.get(index); + } + + // repeated .NameStringPair configuration = 3; + public static final int CONFIGURATION_FIELD_NUMBER = 3; + private java.util.List configuration_; + public java.util.List getConfigurationList() { + return configuration_; + } + public java.util.List + getConfigurationOrBuilderList() { + return configuration_; + } + public int getConfigurationCount() { + return configuration_.size(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) { + return configuration_.get(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( + int index) { + return configuration_.get(index); + } + + private void initFields() { + name_ = com.google.protobuf.ByteString.EMPTY; + attributes_ = java.util.Collections.emptyList(); + configuration_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getAttributesCount(); i++) { + if (!getAttributes(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getConfigurationCount(); i++) { + if (!getConfiguration(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, name_); + } + for (int i = 0; i < attributes_.size(); i++) { + output.writeMessage(2, attributes_.get(i)); + } + for (int i = 0; i < configuration_.size(); i++) { + output.writeMessage(3, configuration_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, name_); + } + for (int i = 0; i < attributes_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, attributes_.get(i)); + } + for (int i = 0; i < configuration_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, configuration_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema) obj; + + boolean result = true; + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); + } + result = result && getAttributesList() + .equals(other.getAttributesList()); + result = result && getConfigurationList() + .equals(other.getConfigurationList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + } + if (getAttributesCount() > 0) { + hash = (37 * hash) + ATTRIBUTES_FIELD_NUMBER; + hash = (53 * hash) + getAttributesList().hashCode(); + } + if (getConfigurationCount() > 0) { + hash = (37 * hash) + CONFIGURATION_FIELD_NUMBER; + hash = (53 * hash) + getConfigurationList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ColumnFamilySchema_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ColumnFamilySchema_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getAttributesFieldBuilder(); + getConfigurationFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + if (attributesBuilder_ == null) { + attributes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); } else { - attributesBuilder_.addAllMessages(values); + attributesBuilder_.clear(); + } + if (configurationBuilder_ == null) { + configuration_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + configurationBuilder_.clear(); } return this; } - public Builder clearAttributes() { + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; if (attributesBuilder_ == null) { - attributes_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + attributes_ = java.util.Collections.unmodifiableList(attributes_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.attributes_ = attributes_; + } else { + result.attributes_ = attributesBuilder_.build(); + } + if (configurationBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + configuration_ = java.util.Collections.unmodifiableList(configuration_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.configuration_ = configuration_; + } else { + result.configuration_ = configurationBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema)other); } else { - attributesBuilder_.clear(); + super.mergeFrom(other); + return this; } - return this; } - public Builder removeAttributes(int index) { + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance()) return this; + if (other.hasName()) { + setName(other.getName()); + } if (attributesBuilder_ == null) { - ensureAttributesIsMutable(); - attributes_.remove(index); - onChanged(); + if (!other.attributes_.isEmpty()) { + if (attributes_.isEmpty()) { + attributes_ = other.attributes_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureAttributesIsMutable(); + attributes_.addAll(other.attributes_); + } + onChanged(); + } } else { - attributesBuilder_.remove(index); + if (!other.attributes_.isEmpty()) { + if (attributesBuilder_.isEmpty()) { + attributesBuilder_.dispose(); + attributesBuilder_ = null; + attributes_ = other.attributes_; + bitField0_ = (bitField0_ & ~0x00000002); + attributesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getAttributesFieldBuilder() : null; + } else { + attributesBuilder_.addAllMessages(other.attributes_); + } + } + } + if (configurationBuilder_ == null) { + if (!other.configuration_.isEmpty()) { + if (configuration_.isEmpty()) { + configuration_ = other.configuration_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureConfigurationIsMutable(); + configuration_.addAll(other.configuration_); + } + onChanged(); + } + } else { + if (!other.configuration_.isEmpty()) { + if (configurationBuilder_.isEmpty()) { + configurationBuilder_.dispose(); + configurationBuilder_ = null; + configuration_ = other.configuration_; + bitField0_ = (bitField0_ & ~0x00000004); + configurationBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getConfigurationFieldBuilder() : null; + } else { + configurationBuilder_.addAllMessages(other.configuration_); + } + } } + this.mergeUnknownFields(other.getUnknownFields()); return this; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder getAttributesBuilder( - int index) { - return getAttributesFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( - int index) { - if (attributesBuilder_ == null) { - return attributes_.get(index); } else { - return attributesBuilder_.getMessageOrBuilder(index); + + public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + for (int i = 0; i < getAttributesCount(); i++) { + if (!getAttributes(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getConfigurationCount(); i++) { + if (!getConfiguration(i).isInitialized()) { + + return false; + } } + return true; } - public java.util.List - getAttributesOrBuilderList() { - if (attributesBuilder_ != null) { - return attributesBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(attributes_); + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addAttributes(subBuilder.buildPartial()); + break; + } + case 26: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addConfiguration(subBuilder.buildPartial()); + break; + } + } } } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder addAttributesBuilder() { - return getAttributesFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance()); + + private int bitField0_; + + // required bytes name = 1; + private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder addAttributesBuilder( - int index) { - return getAttributesFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance()); + public com.google.protobuf.ByteString getName() { + return name_; } - public java.util.List - getAttributesBuilderList() { - return getAttributesFieldBuilder().getBuilderList(); + public Builder setName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> - getAttributesFieldBuilder() { - if (attributesBuilder_ == null) { - attributesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder>( - attributes_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - attributes_ = null; - } - return attributesBuilder_; + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; } - // repeated .ColumnFamilySchema column_families = 3; - private java.util.List columnFamilies_ = + // repeated .BytesBytesPair attributes = 2; + private java.util.List attributes_ = java.util.Collections.emptyList(); - private void ensureColumnFamiliesIsMutable() { - if (!((bitField0_ & 0x00000004) == 0x00000004)) { - columnFamilies_ = new java.util.ArrayList(columnFamilies_); - bitField0_ |= 0x00000004; + private void ensureAttributesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + attributes_ = new java.util.ArrayList(attributes_); + bitField0_ |= 0x00000002; } } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder> columnFamiliesBuilder_; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> attributesBuilder_; - public java.util.List getColumnFamiliesList() { - if (columnFamiliesBuilder_ == null) { - return java.util.Collections.unmodifiableList(columnFamilies_); + public java.util.List getAttributesList() { + if (attributesBuilder_ == null) { + return java.util.Collections.unmodifiableList(attributes_); } else { - return columnFamiliesBuilder_.getMessageList(); + return attributesBuilder_.getMessageList(); } } - public int getColumnFamiliesCount() { - if (columnFamiliesBuilder_ == null) { - return columnFamilies_.size(); + public int getAttributesCount() { + if (attributesBuilder_ == null) { + return attributes_.size(); } else { - return columnFamiliesBuilder_.getCount(); + return attributesBuilder_.getCount(); } } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnFamilies(int index) { - if (columnFamiliesBuilder_ == null) { - return columnFamilies_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index) { + if (attributesBuilder_ == null) { + return attributes_.get(index); } else { - return columnFamiliesBuilder_.getMessage(index); + return attributesBuilder_.getMessage(index); } } - public Builder setColumnFamilies( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) { - if (columnFamiliesBuilder_ == null) { + public Builder setAttributes( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { + if (attributesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureColumnFamiliesIsMutable(); - columnFamilies_.set(index, value); + ensureAttributesIsMutable(); + attributes_.set(index, value); onChanged(); } else { - columnFamiliesBuilder_.setMessage(index, value); + attributesBuilder_.setMessage(index, value); } return this; } - public Builder setColumnFamilies( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder builderForValue) { - if (columnFamiliesBuilder_ == null) { - ensureColumnFamiliesIsMutable(); - columnFamilies_.set(index, builderForValue.build()); + public Builder setAttributes( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { + if (attributesBuilder_ == null) { + ensureAttributesIsMutable(); + attributes_.set(index, builderForValue.build()); onChanged(); } else { - columnFamiliesBuilder_.setMessage(index, builderForValue.build()); + attributesBuilder_.setMessage(index, builderForValue.build()); } return this; } - public Builder addColumnFamilies(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) { - if (columnFamiliesBuilder_ == null) { + public Builder addAttributes(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { + if (attributesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureColumnFamiliesIsMutable(); - columnFamilies_.add(value); + ensureAttributesIsMutable(); + attributes_.add(value); onChanged(); } else { - columnFamiliesBuilder_.addMessage(value); + attributesBuilder_.addMessage(value); } return this; } - public Builder addColumnFamilies( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) { - if (columnFamiliesBuilder_ == null) { + public Builder addAttributes( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { + if (attributesBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureColumnFamiliesIsMutable(); - columnFamilies_.add(index, value); + ensureAttributesIsMutable(); + attributes_.add(index, value); onChanged(); } else { - columnFamiliesBuilder_.addMessage(index, value); + attributesBuilder_.addMessage(index, value); } return this; } - public Builder addColumnFamilies( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder builderForValue) { - if (columnFamiliesBuilder_ == null) { - ensureColumnFamiliesIsMutable(); - columnFamilies_.add(builderForValue.build()); + public Builder addAttributes( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { + if (attributesBuilder_ == null) { + ensureAttributesIsMutable(); + attributes_.add(builderForValue.build()); onChanged(); } else { - columnFamiliesBuilder_.addMessage(builderForValue.build()); + attributesBuilder_.addMessage(builderForValue.build()); } return this; } - public Builder addColumnFamilies( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder builderForValue) { - if (columnFamiliesBuilder_ == null) { - ensureColumnFamiliesIsMutable(); - columnFamilies_.add(index, builderForValue.build()); + public Builder addAttributes( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { + if (attributesBuilder_ == null) { + ensureAttributesIsMutable(); + attributes_.add(index, builderForValue.build()); onChanged(); } else { - columnFamiliesBuilder_.addMessage(index, builderForValue.build()); + attributesBuilder_.addMessage(index, builderForValue.build()); } return this; } - public Builder addAllColumnFamilies( - java.lang.Iterable values) { - if (columnFamiliesBuilder_ == null) { - ensureColumnFamiliesIsMutable(); - super.addAll(values, columnFamilies_); + public Builder addAllAttributes( + java.lang.Iterable values) { + if (attributesBuilder_ == null) { + ensureAttributesIsMutable(); + super.addAll(values, attributes_); onChanged(); } else { - columnFamiliesBuilder_.addAllMessages(values); + attributesBuilder_.addAllMessages(values); } return this; } - public Builder clearColumnFamilies() { - if (columnFamiliesBuilder_ == null) { - columnFamilies_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); + public Builder clearAttributes() { + if (attributesBuilder_ == null) { + attributes_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); onChanged(); } else { - columnFamiliesBuilder_.clear(); + attributesBuilder_.clear(); } return this; } - public Builder removeColumnFamilies(int index) { - if (columnFamiliesBuilder_ == null) { - ensureColumnFamiliesIsMutable(); - columnFamilies_.remove(index); + public Builder removeAttributes(int index) { + if (attributesBuilder_ == null) { + ensureAttributesIsMutable(); + attributes_.remove(index); onChanged(); } else { - columnFamiliesBuilder_.remove(index); + attributesBuilder_.remove(index); } return this; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder getColumnFamiliesBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder getAttributesBuilder( int index) { - return getColumnFamiliesFieldBuilder().getBuilder(index); + return getAttributesFieldBuilder().getBuilder(index); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnFamiliesOrBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( int index) { - if (columnFamiliesBuilder_ == null) { - return columnFamilies_.get(index); } else { - return columnFamiliesBuilder_.getMessageOrBuilder(index); + if (attributesBuilder_ == null) { + return attributes_.get(index); } else { + return attributesBuilder_.getMessageOrBuilder(index); } } - public java.util.List - getColumnFamiliesOrBuilderList() { - if (columnFamiliesBuilder_ != null) { - return columnFamiliesBuilder_.getMessageOrBuilderList(); + public java.util.List + getAttributesOrBuilderList() { + if (attributesBuilder_ != null) { + return attributesBuilder_.getMessageOrBuilderList(); } else { - return java.util.Collections.unmodifiableList(columnFamilies_); + return java.util.Collections.unmodifiableList(attributes_); } } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder addColumnFamiliesBuilder() { - return getColumnFamiliesFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance()); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder addAttributesBuilder() { + return getAttributesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance()); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder addColumnFamiliesBuilder( + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder addAttributesBuilder( int index) { - return getColumnFamiliesFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance()); + return getAttributesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance()); } - public java.util.List - getColumnFamiliesBuilderList() { - return getColumnFamiliesFieldBuilder().getBuilderList(); + public java.util.List + getAttributesBuilderList() { + return getAttributesFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder> - getColumnFamiliesFieldBuilder() { - if (columnFamiliesBuilder_ == null) { - columnFamiliesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>( - columnFamilies_, - ((bitField0_ & 0x00000004) == 0x00000004), + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> + getAttributesFieldBuilder() { + if (attributesBuilder_ == null) { + attributesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder>( + attributes_, + ((bitField0_ & 0x00000002) == 0x00000002), getParentForChildren(), isClean()); - columnFamilies_ = null; + attributes_ = null; } - return columnFamiliesBuilder_; + return attributesBuilder_; } - // repeated .NameStringPair configuration = 4; + // repeated .NameStringPair configuration = 3; private java.util.List configuration_ = java.util.Collections.emptyList(); private void ensureConfigurationIsMutable() { - if (!((bitField0_ & 0x00000008) == 0x00000008)) { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { configuration_ = new java.util.ArrayList(configuration_); - bitField0_ |= 0x00000008; + bitField0_ |= 0x00000004; } } @@ -1264,7 +2782,7 @@ public final class HBaseProtos { public Builder clearConfiguration() { if (configurationBuilder_ == null) { configuration_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); + bitField0_ = (bitField0_ & ~0x00000004); onChanged(); } else { configurationBuilder_.clear(); @@ -1320,7 +2838,7 @@ public final class HBaseProtos { configurationBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder>( configuration_, - ((bitField0_ & 0x00000008) == 0x00000008), + ((bitField0_ & 0x00000004) == 0x00000004), getParentForChildren(), isClean()); configuration_ = null; @@ -1328,150 +2846,161 @@ public final class HBaseProtos { return configurationBuilder_; } - // @@protoc_insertion_point(builder_scope:TableSchema) + // @@protoc_insertion_point(builder_scope:ColumnFamilySchema) } static { - defaultInstance = new TableSchema(true); + defaultInstance = new ColumnFamilySchema(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:TableSchema) + // @@protoc_insertion_point(class_scope:ColumnFamilySchema) } - public interface ColumnFamilySchemaOrBuilder + public interface RegionInfoOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bytes name = 1; - boolean hasName(); - com.google.protobuf.ByteString getName(); + // required uint64 region_id = 1; + boolean hasRegionId(); + long getRegionId(); - // repeated .BytesBytesPair attributes = 2; - java.util.List - getAttributesList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index); - int getAttributesCount(); - java.util.List - getAttributesOrBuilderList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( - int index); + // required .TableName table_name = 2; + boolean hasTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); - // repeated .NameStringPair configuration = 3; - java.util.List - getConfigurationList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index); - int getConfigurationCount(); - java.util.List - getConfigurationOrBuilderList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( - int index); + // optional bytes start_key = 3; + boolean hasStartKey(); + com.google.protobuf.ByteString getStartKey(); + + // optional bytes end_key = 4; + boolean hasEndKey(); + com.google.protobuf.ByteString getEndKey(); + + // optional bool offline = 5; + boolean hasOffline(); + boolean getOffline(); + + // optional bool split = 6; + boolean hasSplit(); + boolean getSplit(); } - public static final class ColumnFamilySchema extends + public static final class RegionInfo extends com.google.protobuf.GeneratedMessage - implements ColumnFamilySchemaOrBuilder { - // Use ColumnFamilySchema.newBuilder() to construct. - private ColumnFamilySchema(Builder builder) { + implements RegionInfoOrBuilder { + // Use RegionInfo.newBuilder() to construct. + private RegionInfo(Builder builder) { super(builder); } - private ColumnFamilySchema(boolean noInit) {} + private RegionInfo(boolean noInit) {} - private static final ColumnFamilySchema defaultInstance; - public static ColumnFamilySchema getDefaultInstance() { + private static final RegionInfo defaultInstance; + public static RegionInfo getDefaultInstance() { return defaultInstance; } - public ColumnFamilySchema getDefaultInstanceForType() { + public RegionInfo getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ColumnFamilySchema_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionInfo_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ColumnFamilySchema_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionInfo_fieldAccessorTable; } private int bitField0_; - // required bytes name = 1; - public static final int NAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString name_; - public boolean hasName() { + // required uint64 region_id = 1; + public static final int REGION_ID_FIELD_NUMBER = 1; + private long regionId_; + public boolean hasRegionId() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getName() { - return name_; + public long getRegionId() { + return regionId_; } - // repeated .BytesBytesPair attributes = 2; - public static final int ATTRIBUTES_FIELD_NUMBER = 2; - private java.util.List attributes_; - public java.util.List getAttributesList() { - return attributes_; + // required .TableName table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); } - public java.util.List - getAttributesOrBuilderList() { - return attributes_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; } - public int getAttributesCount() { - return attributes_.size(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index) { - return attributes_.get(index); + + // optional bytes start_key = 3; + public static final int START_KEY_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString startKey_; + public boolean hasStartKey() { + return ((bitField0_ & 0x00000004) == 0x00000004); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( - int index) { - return attributes_.get(index); + public com.google.protobuf.ByteString getStartKey() { + return startKey_; } - // repeated .NameStringPair configuration = 3; - public static final int CONFIGURATION_FIELD_NUMBER = 3; - private java.util.List configuration_; - public java.util.List getConfigurationList() { - return configuration_; + // optional bytes end_key = 4; + public static final int END_KEY_FIELD_NUMBER = 4; + private com.google.protobuf.ByteString endKey_; + public boolean hasEndKey() { + return ((bitField0_ & 0x00000008) == 0x00000008); } - public java.util.List - getConfigurationOrBuilderList() { - return configuration_; + public com.google.protobuf.ByteString getEndKey() { + return endKey_; } - public int getConfigurationCount() { - return configuration_.size(); + + // optional bool offline = 5; + public static final int OFFLINE_FIELD_NUMBER = 5; + private boolean offline_; + public boolean hasOffline() { + return ((bitField0_ & 0x00000010) == 0x00000010); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) { - return configuration_.get(index); + public boolean getOffline() { + return offline_; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( - int index) { - return configuration_.get(index); + + // optional bool split = 6; + public static final int SPLIT_FIELD_NUMBER = 6; + private boolean split_; + public boolean hasSplit() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + public boolean getSplit() { + return split_; } private void initFields() { - name_ = com.google.protobuf.ByteString.EMPTY; - attributes_ = java.util.Collections.emptyList(); - configuration_ = java.util.Collections.emptyList(); + regionId_ = 0L; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + startKey_ = com.google.protobuf.ByteString.EMPTY; + endKey_ = com.google.protobuf.ByteString.EMPTY; + offline_ = false; + split_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasName()) { + if (!hasRegionId()) { memoizedIsInitialized = 0; return false; } - for (int i = 0; i < getAttributesCount(); i++) { - if (!getAttributes(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; } - for (int i = 0; i < getConfigurationCount(); i++) { - if (!getConfiguration(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; } memoizedIsInitialized = 1; return true; @@ -1481,13 +3010,22 @@ public final class HBaseProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, name_); + output.writeUInt64(1, regionId_); } - for (int i = 0; i < attributes_.size(); i++) { - output.writeMessage(2, attributes_.get(i)); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, tableName_); } - for (int i = 0; i < configuration_.size(); i++) { - output.writeMessage(3, configuration_.get(i)); + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, startKey_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(4, endKey_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBool(5, offline_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBool(6, split_); } getUnknownFields().writeTo(output); } @@ -1500,15 +3038,27 @@ public final class HBaseProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, name_); + .computeUInt64Size(1, regionId_); } - for (int i = 0; i < attributes_.size(); i++) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, attributes_.get(i)); + .computeMessageSize(2, tableName_); } - for (int i = 0; i < configuration_.size(); i++) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, configuration_.get(i)); + .computeBytesSize(3, startKey_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, endKey_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(5, offline_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(6, split_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -1527,21 +3077,42 @@ public final class HBaseProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo) obj; boolean result = true; - result = result && (hasName() == other.hasName()); - if (hasName()) { - result = result && getName() - .equals(other.getName()); + result = result && (hasRegionId() == other.hasRegionId()); + if (hasRegionId()) { + result = result && (getRegionId() + == other.getRegionId()); + } + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasStartKey() == other.hasStartKey()); + if (hasStartKey()) { + result = result && getStartKey() + .equals(other.getStartKey()); + } + result = result && (hasEndKey() == other.hasEndKey()); + if (hasEndKey()) { + result = result && getEndKey() + .equals(other.getEndKey()); + } + result = result && (hasOffline() == other.hasOffline()); + if (hasOffline()) { + result = result && (getOffline() + == other.getOffline()); + } + result = result && (hasSplit() == other.hasSplit()); + if (hasSplit()) { + result = result && (getSplit() + == other.getSplit()); } - result = result && getAttributesList() - .equals(other.getAttributesList()); - result = result && getConfigurationList() - .equals(other.getConfigurationList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -1551,57 +3122,69 @@ public final class HBaseProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasName()) { - hash = (37 * hash) + NAME_FIELD_NUMBER; - hash = (53 * hash) + getName().hashCode(); + if (hasRegionId()) { + hash = (37 * hash) + REGION_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getRegionId()); } - if (getAttributesCount() > 0) { - hash = (37 * hash) + ATTRIBUTES_FIELD_NUMBER; - hash = (53 * hash) + getAttributesList().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); } - if (getConfigurationCount() > 0) { - hash = (37 * hash) + CONFIGURATION_FIELD_NUMBER; - hash = (53 * hash) + getConfigurationList().hashCode(); + if (hasStartKey()) { + hash = (37 * hash) + START_KEY_FIELD_NUMBER; + hash = (53 * hash) + getStartKey().hashCode(); + } + if (hasEndKey()) { + hash = (37 * hash) + END_KEY_FIELD_NUMBER; + hash = (53 * hash) + getEndKey().hashCode(); + } + if (hasOffline()) { + hash = (37 * hash) + OFFLINE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getOffline()); + } + if (hasSplit()) { + hash = (37 * hash) + SPLIT_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getSplit()); } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -1610,7 +3193,7 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -1621,12 +3204,12 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -1636,7 +3219,7 @@ public final class HBaseProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -1649,18 +3232,18 @@ public final class HBaseProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ColumnFamilySchema_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionInfo_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ColumnFamilySchema_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionInfo_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -1671,8 +3254,7 @@ public final class HBaseProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getAttributesFieldBuilder(); - getConfigurationFieldBuilder(); + getTableNameFieldBuilder(); } } private static Builder create() { @@ -1681,20 +3263,22 @@ public final class HBaseProtos { public Builder clear() { super.clear(); - name_ = com.google.protobuf.ByteString.EMPTY; + regionId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); - if (attributesBuilder_ == null) { - attributes_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - attributesBuilder_.clear(); - } - if (configurationBuilder_ == null) { - configuration_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); } else { - configurationBuilder_.clear(); + tableNameBuilder_.clear(); } + bitField0_ = (bitField0_ & ~0x00000002); + startKey_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + endKey_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + offline_ = false; + bitField0_ = (bitField0_ & ~0x00000010); + split_ = false; + bitField0_ = (bitField0_ & ~0x00000020); return this; } @@ -1704,24 +3288,24 @@ public final class HBaseProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -1729,123 +3313,88 @@ public final class HBaseProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.name_ = name_; - if (attributesBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - attributes_ = java.util.Collections.unmodifiableList(attributes_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.attributes_ = attributes_; + result.regionId_ = regionId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; } else { - result.attributes_ = attributesBuilder_.build(); + result.tableName_ = tableNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.startKey_ = startKey_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.endKey_ = endKey_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; } - if (configurationBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004)) { - configuration_ = java.util.Collections.unmodifiableList(configuration_); - bitField0_ = (bitField0_ & ~0x00000004); - } - result.configuration_ = configuration_; - } else { - result.configuration_ = configurationBuilder_.build(); + result.offline_ = offline_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; } + result.split_ = split_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance()) return this; - if (other.hasName()) { - setName(other.getName()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) return this; + if (other.hasRegionId()) { + setRegionId(other.getRegionId()); } - if (attributesBuilder_ == null) { - if (!other.attributes_.isEmpty()) { - if (attributes_.isEmpty()) { - attributes_ = other.attributes_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureAttributesIsMutable(); - attributes_.addAll(other.attributes_); - } - onChanged(); - } - } else { - if (!other.attributes_.isEmpty()) { - if (attributesBuilder_.isEmpty()) { - attributesBuilder_.dispose(); - attributesBuilder_ = null; - attributes_ = other.attributes_; - bitField0_ = (bitField0_ & ~0x00000002); - attributesBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getAttributesFieldBuilder() : null; - } else { - attributesBuilder_.addAllMessages(other.attributes_); - } - } + if (other.hasTableName()) { + mergeTableName(other.getTableName()); } - if (configurationBuilder_ == null) { - if (!other.configuration_.isEmpty()) { - if (configuration_.isEmpty()) { - configuration_ = other.configuration_; - bitField0_ = (bitField0_ & ~0x00000004); - } else { - ensureConfigurationIsMutable(); - configuration_.addAll(other.configuration_); - } - onChanged(); - } - } else { - if (!other.configuration_.isEmpty()) { - if (configurationBuilder_.isEmpty()) { - configurationBuilder_.dispose(); - configurationBuilder_ = null; - configuration_ = other.configuration_; - bitField0_ = (bitField0_ & ~0x00000004); - configurationBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getConfigurationFieldBuilder() : null; - } else { - configurationBuilder_.addAllMessages(other.configuration_); - } - } + if (other.hasStartKey()) { + setStartKey(other.getStartKey()); + } + if (other.hasEndKey()) { + setEndKey(other.getEndKey()); + } + if (other.hasOffline()) { + setOffline(other.getOffline()); + } + if (other.hasSplit()) { + setSplit(other.getSplit()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasName()) { + if (!hasRegionId()) { return false; } - for (int i = 0; i < getAttributesCount(); i++) { - if (!getAttributes(i).isInitialized()) { - - return false; - } + if (!hasTableName()) { + + return false; } - for (int i = 0; i < getConfigurationCount(); i++) { - if (!getConfiguration(i).isInitialized()) { - - return false; - } + if (!getTableName().isInitialized()) { + + return false; } return true; } @@ -1873,572 +3422,333 @@ public final class HBaseProtos { } break; } - case 10: { + case 8: { bitField0_ |= 0x00000001; - name_ = input.readBytes(); + regionId_ = input.readUInt64(); break; } case 18: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.newBuilder(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(); + if (hasTableName()) { + subBuilder.mergeFrom(getTableName()); + } input.readMessage(subBuilder, extensionRegistry); - addAttributes(subBuilder.buildPartial()); + setTableName(subBuilder.buildPartial()); break; } case 26: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addConfiguration(subBuilder.buildPartial()); + bitField0_ |= 0x00000004; + startKey_ = input.readBytes(); + break; + } + case 34: { + bitField0_ |= 0x00000008; + endKey_ = input.readBytes(); + break; + } + case 40: { + bitField0_ |= 0x00000010; + offline_ = input.readBool(); + break; + } + case 48: { + bitField0_ |= 0x00000020; + split_ = input.readBool(); break; } } } } - - private int bitField0_; - - // required bytes name = 1; - private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public com.google.protobuf.ByteString getName() { - return name_; - } - public Builder setName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - name_ = value; - onChanged(); - return this; - } - public Builder clearName() { - bitField0_ = (bitField0_ & ~0x00000001); - name_ = getDefaultInstance().getName(); - onChanged(); - return this; - } - - // repeated .BytesBytesPair attributes = 2; - private java.util.List attributes_ = - java.util.Collections.emptyList(); - private void ensureAttributesIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - attributes_ = new java.util.ArrayList(attributes_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> attributesBuilder_; - - public java.util.List getAttributesList() { - if (attributesBuilder_ == null) { - return java.util.Collections.unmodifiableList(attributes_); - } else { - return attributesBuilder_.getMessageList(); - } - } - public int getAttributesCount() { - if (attributesBuilder_ == null) { - return attributes_.size(); - } else { - return attributesBuilder_.getCount(); - } - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getAttributes(int index) { - if (attributesBuilder_ == null) { - return attributes_.get(index); - } else { - return attributesBuilder_.getMessage(index); - } - } - public Builder setAttributes( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { - if (attributesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureAttributesIsMutable(); - attributes_.set(index, value); - onChanged(); - } else { - attributesBuilder_.setMessage(index, value); - } - return this; - } - public Builder setAttributes( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { - if (attributesBuilder_ == null) { - ensureAttributesIsMutable(); - attributes_.set(index, builderForValue.build()); - onChanged(); - } else { - attributesBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAttributes(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { - if (attributesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureAttributesIsMutable(); - attributes_.add(value); - onChanged(); - } else { - attributesBuilder_.addMessage(value); - } - return this; - } - public Builder addAttributes( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair value) { - if (attributesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureAttributesIsMutable(); - attributes_.add(index, value); - onChanged(); - } else { - attributesBuilder_.addMessage(index, value); - } - return this; - } - public Builder addAttributes( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { - if (attributesBuilder_ == null) { - ensureAttributesIsMutable(); - attributes_.add(builderForValue.build()); - onChanged(); - } else { - attributesBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addAttributes( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder builderForValue) { - if (attributesBuilder_ == null) { - ensureAttributesIsMutable(); - attributes_.add(index, builderForValue.build()); - onChanged(); - } else { - attributesBuilder_.addMessage(index, builderForValue.build()); - } - return this; + + private int bitField0_; + + // required uint64 region_id = 1; + private long regionId_ ; + public boolean hasRegionId() { + return ((bitField0_ & 0x00000001) == 0x00000001); } - public Builder addAllAttributes( - java.lang.Iterable values) { - if (attributesBuilder_ == null) { - ensureAttributesIsMutable(); - super.addAll(values, attributes_); - onChanged(); - } else { - attributesBuilder_.addAllMessages(values); - } - return this; + public long getRegionId() { + return regionId_; } - public Builder clearAttributes() { - if (attributesBuilder_ == null) { - attributes_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - attributesBuilder_.clear(); - } + public Builder setRegionId(long value) { + bitField0_ |= 0x00000001; + regionId_ = value; + onChanged(); return this; } - public Builder removeAttributes(int index) { - if (attributesBuilder_ == null) { - ensureAttributesIsMutable(); - attributes_.remove(index); - onChanged(); - } else { - attributesBuilder_.remove(index); - } + public Builder clearRegionId() { + bitField0_ = (bitField0_ & ~0x00000001); + regionId_ = 0L; + onChanged(); return this; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder getAttributesBuilder( - int index) { - return getAttributesFieldBuilder().getBuilder(index); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder getAttributesOrBuilder( - int index) { - if (attributesBuilder_ == null) { - return attributes_.get(index); } else { - return attributesBuilder_.getMessageOrBuilder(index); - } - } - public java.util.List - getAttributesOrBuilderList() { - if (attributesBuilder_ != null) { - return attributesBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(attributes_); - } - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder addAttributesBuilder() { - return getAttributesFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance()); - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder addAttributesBuilder( - int index) { - return getAttributesFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance()); - } - public java.util.List - getAttributesBuilderList() { - return getAttributesFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder> - getAttributesFieldBuilder() { - if (attributesBuilder_ == null) { - attributesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder>( - attributes_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - attributes_ = null; - } - return attributesBuilder_; - } - - // repeated .NameStringPair configuration = 3; - private java.util.List configuration_ = - java.util.Collections.emptyList(); - private void ensureConfigurationIsMutable() { - if (!((bitField0_ & 0x00000004) == 0x00000004)) { - configuration_ = new java.util.ArrayList(configuration_); - bitField0_ |= 0x00000004; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> configurationBuilder_; - public java.util.List getConfigurationList() { - if (configurationBuilder_ == null) { - return java.util.Collections.unmodifiableList(configuration_); - } else { - return configurationBuilder_.getMessageList(); - } - } - public int getConfigurationCount() { - if (configurationBuilder_ == null) { - return configuration_.size(); - } else { - return configurationBuilder_.getCount(); - } + // required .TableName table_name = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) { - if (configurationBuilder_ == null) { - return configuration_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; } else { - return configurationBuilder_.getMessage(index); + return tableNameBuilder_.getMessage(); } } - public Builder setConfiguration( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { - if (configurationBuilder_ == null) { + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { if (value == null) { throw new NullPointerException(); } - ensureConfigurationIsMutable(); - configuration_.set(index, value); + tableName_ = value; onChanged(); } else { - configurationBuilder_.setMessage(index, value); + tableNameBuilder_.setMessage(value); } + bitField0_ |= 0x00000002; return this; } - public Builder setConfiguration( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { - if (configurationBuilder_ == null) { - ensureConfigurationIsMutable(); - configuration_.set(index, builderForValue.build()); + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); onChanged(); } else { - configurationBuilder_.setMessage(index, builderForValue.build()); + tableNameBuilder_.setMessage(builderForValue.build()); } + bitField0_ |= 0x00000002; return this; } - public Builder addConfiguration(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { - if (configurationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; } - ensureConfigurationIsMutable(); - configuration_.add(value); onChanged(); } else { - configurationBuilder_.addMessage(value); + tableNameBuilder_.mergeFrom(value); } + bitField0_ |= 0x00000002; return this; } - public Builder addConfiguration( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { - if (configurationBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureConfigurationIsMutable(); - configuration_.add(index, value); + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); onChanged(); } else { - configurationBuilder_.addMessage(index, value); + tableNameBuilder_.clear(); } + bitField0_ = (bitField0_ & ~0x00000002); return this; } - public Builder addConfiguration( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { - if (configurationBuilder_ == null) { - ensureConfigurationIsMutable(); - configuration_.add(builderForValue.build()); - onChanged(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); } else { - configurationBuilder_.addMessage(builderForValue.build()); + return tableName_; } - return this; } - public Builder addConfiguration( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { - if (configurationBuilder_ == null) { - ensureConfigurationIsMutable(); - configuration_.add(index, builderForValue.build()); - onChanged(); - } else { - configurationBuilder_.addMessage(index, builderForValue.build()); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; } + return tableNameBuilder_; + } + + // optional bytes start_key = 3; + private com.google.protobuf.ByteString startKey_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasStartKey() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public com.google.protobuf.ByteString getStartKey() { + return startKey_; + } + public Builder setStartKey(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + startKey_ = value; + onChanged(); return this; } - public Builder addAllConfiguration( - java.lang.Iterable values) { - if (configurationBuilder_ == null) { - ensureConfigurationIsMutable(); - super.addAll(values, configuration_); - onChanged(); - } else { - configurationBuilder_.addAllMessages(values); - } + public Builder clearStartKey() { + bitField0_ = (bitField0_ & ~0x00000004); + startKey_ = getDefaultInstance().getStartKey(); + onChanged(); return this; } - public Builder clearConfiguration() { - if (configurationBuilder_ == null) { - configuration_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - onChanged(); - } else { - configurationBuilder_.clear(); - } + + // optional bytes end_key = 4; + private com.google.protobuf.ByteString endKey_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasEndKey() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public com.google.protobuf.ByteString getEndKey() { + return endKey_; + } + public Builder setEndKey(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + endKey_ = value; + onChanged(); return this; } - public Builder removeConfiguration(int index) { - if (configurationBuilder_ == null) { - ensureConfigurationIsMutable(); - configuration_.remove(index); - onChanged(); - } else { - configurationBuilder_.remove(index); - } + public Builder clearEndKey() { + bitField0_ = (bitField0_ & ~0x00000008); + endKey_ = getDefaultInstance().getEndKey(); + onChanged(); return this; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder getConfigurationBuilder( - int index) { - return getConfigurationFieldBuilder().getBuilder(index); + + // optional bool offline = 5; + private boolean offline_ ; + public boolean hasOffline() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public boolean getOffline() { + return offline_; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( - int index) { - if (configurationBuilder_ == null) { - return configuration_.get(index); } else { - return configurationBuilder_.getMessageOrBuilder(index); - } + public Builder setOffline(boolean value) { + bitField0_ |= 0x00000010; + offline_ = value; + onChanged(); + return this; } - public java.util.List - getConfigurationOrBuilderList() { - if (configurationBuilder_ != null) { - return configurationBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(configuration_); - } + public Builder clearOffline() { + bitField0_ = (bitField0_ & ~0x00000010); + offline_ = false; + onChanged(); + return this; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addConfigurationBuilder() { - return getConfigurationFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()); + + // optional bool split = 6; + private boolean split_ ; + public boolean hasSplit() { + return ((bitField0_ & 0x00000020) == 0x00000020); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addConfigurationBuilder( - int index) { - return getConfigurationFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()); + public boolean getSplit() { + return split_; } - public java.util.List - getConfigurationBuilderList() { - return getConfigurationFieldBuilder().getBuilderList(); + public Builder setSplit(boolean value) { + bitField0_ |= 0x00000020; + split_ = value; + onChanged(); + return this; } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> - getConfigurationFieldBuilder() { - if (configurationBuilder_ == null) { - configurationBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder>( - configuration_, - ((bitField0_ & 0x00000004) == 0x00000004), - getParentForChildren(), - isClean()); - configuration_ = null; - } - return configurationBuilder_; + public Builder clearSplit() { + bitField0_ = (bitField0_ & ~0x00000020); + split_ = false; + onChanged(); + return this; } - // @@protoc_insertion_point(builder_scope:ColumnFamilySchema) + // @@protoc_insertion_point(builder_scope:RegionInfo) } static { - defaultInstance = new ColumnFamilySchema(true); + defaultInstance = new RegionInfo(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:ColumnFamilySchema) + // @@protoc_insertion_point(class_scope:RegionInfo) } - public interface RegionInfoOrBuilder + public interface FavoredNodesOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required uint64 region_id = 1; - boolean hasRegionId(); - long getRegionId(); - - // required bytes table_name = 2; - boolean hasTableName(); - com.google.protobuf.ByteString getTableName(); - - // optional bytes start_key = 3; - boolean hasStartKey(); - com.google.protobuf.ByteString getStartKey(); - - // optional bytes end_key = 4; - boolean hasEndKey(); - com.google.protobuf.ByteString getEndKey(); - - // optional bool offline = 5; - boolean hasOffline(); - boolean getOffline(); - - // optional bool split = 6; - boolean hasSplit(); - boolean getSplit(); + // repeated .ServerName favored_node = 1; + java.util.List + getFavoredNodeList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNode(int index); + int getFavoredNodeCount(); + java.util.List + getFavoredNodeOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodeOrBuilder( + int index); } - public static final class RegionInfo extends + public static final class FavoredNodes extends com.google.protobuf.GeneratedMessage - implements RegionInfoOrBuilder { - // Use RegionInfo.newBuilder() to construct. - private RegionInfo(Builder builder) { + implements FavoredNodesOrBuilder { + // Use FavoredNodes.newBuilder() to construct. + private FavoredNodes(Builder builder) { super(builder); } - private RegionInfo(boolean noInit) {} + private FavoredNodes(boolean noInit) {} - private static final RegionInfo defaultInstance; - public static RegionInfo getDefaultInstance() { + private static final FavoredNodes defaultInstance; + public static FavoredNodes getDefaultInstance() { return defaultInstance; } - public RegionInfo getDefaultInstanceForType() { + public FavoredNodes getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionInfo_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_FavoredNodes_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionInfo_fieldAccessorTable; - } - - private int bitField0_; - // required uint64 region_id = 1; - public static final int REGION_ID_FIELD_NUMBER = 1; - private long regionId_; - public boolean hasRegionId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getRegionId() { - return regionId_; - } - - // required bytes table_name = 2; - public static final int TABLE_NAME_FIELD_NUMBER = 2; - private com.google.protobuf.ByteString tableName_; - public boolean hasTableName() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public com.google.protobuf.ByteString getTableName() { - return tableName_; - } - - // optional bytes start_key = 3; - public static final int START_KEY_FIELD_NUMBER = 3; - private com.google.protobuf.ByteString startKey_; - public boolean hasStartKey() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public com.google.protobuf.ByteString getStartKey() { - return startKey_; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_FavoredNodes_fieldAccessorTable; } - // optional bytes end_key = 4; - public static final int END_KEY_FIELD_NUMBER = 4; - private com.google.protobuf.ByteString endKey_; - public boolean hasEndKey() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - public com.google.protobuf.ByteString getEndKey() { - return endKey_; + // repeated .ServerName favored_node = 1; + public static final int FAVORED_NODE_FIELD_NUMBER = 1; + private java.util.List favoredNode_; + public java.util.List getFavoredNodeList() { + return favoredNode_; } - - // optional bool offline = 5; - public static final int OFFLINE_FIELD_NUMBER = 5; - private boolean offline_; - public boolean hasOffline() { - return ((bitField0_ & 0x00000010) == 0x00000010); + public java.util.List + getFavoredNodeOrBuilderList() { + return favoredNode_; } - public boolean getOffline() { - return offline_; + public int getFavoredNodeCount() { + return favoredNode_.size(); } - - // optional bool split = 6; - public static final int SPLIT_FIELD_NUMBER = 6; - private boolean split_; - public boolean hasSplit() { - return ((bitField0_ & 0x00000020) == 0x00000020); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNode(int index) { + return favoredNode_.get(index); } - public boolean getSplit() { - return split_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodeOrBuilder( + int index) { + return favoredNode_.get(index); } private void initFields() { - regionId_ = 0L; - tableName_ = com.google.protobuf.ByteString.EMPTY; - startKey_ = com.google.protobuf.ByteString.EMPTY; - endKey_ = com.google.protobuf.ByteString.EMPTY; - offline_ = false; - split_ = false; + favoredNode_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasRegionId()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasTableName()) { - memoizedIsInitialized = 0; - return false; + for (int i = 0; i < getFavoredNodeCount(); i++) { + if (!getFavoredNode(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } } memoizedIsInitialized = 1; return true; @@ -2447,56 +3757,21 @@ public final class HBaseProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, regionId_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, tableName_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, startKey_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeBytes(4, endKey_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeBool(5, offline_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeBool(6, split_); + for (int i = 0; i < favoredNode_.size(); i++) { + output.writeMessage(1, favoredNode_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, regionId_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, tableName_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, startKey_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(4, endKey_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(5, offline_); - } - if (((bitField0_ & 0x00000020) == 0x00000020)) { + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < favoredNode_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(6, split_); + .computeMessageSize(1, favoredNode_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -2515,42 +3790,14 @@ public final class HBaseProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes) obj; boolean result = true; - result = result && (hasRegionId() == other.hasRegionId()); - if (hasRegionId()) { - result = result && (getRegionId() - == other.getRegionId()); - } - result = result && (hasTableName() == other.hasTableName()); - if (hasTableName()) { - result = result && getTableName() - .equals(other.getTableName()); - } - result = result && (hasStartKey() == other.hasStartKey()); - if (hasStartKey()) { - result = result && getStartKey() - .equals(other.getStartKey()); - } - result = result && (hasEndKey() == other.hasEndKey()); - if (hasEndKey()) { - result = result && getEndKey() - .equals(other.getEndKey()); - } - result = result && (hasOffline() == other.hasOffline()); - if (hasOffline()) { - result = result && (getOffline() - == other.getOffline()); - } - result = result && (hasSplit() == other.hasSplit()); - if (hasSplit()) { - result = result && (getSplit() - == other.getSplit()); - } + result = result && getFavoredNodeList() + .equals(other.getFavoredNodeList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -2560,69 +3807,49 @@ public final class HBaseProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasRegionId()) { - hash = (37 * hash) + REGION_ID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getRegionId()); - } - if (hasTableName()) { - hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; - hash = (53 * hash) + getTableName().hashCode(); - } - if (hasStartKey()) { - hash = (37 * hash) + START_KEY_FIELD_NUMBER; - hash = (53 * hash) + getStartKey().hashCode(); - } - if (hasEndKey()) { - hash = (37 * hash) + END_KEY_FIELD_NUMBER; - hash = (53 * hash) + getEndKey().hashCode(); - } - if (hasOffline()) { - hash = (37 * hash) + OFFLINE_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getOffline()); - } - if (hasSplit()) { - hash = (37 * hash) + SPLIT_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getSplit()); + if (getFavoredNodeCount() > 0) { + hash = (37 * hash) + FAVORED_NODE_FIELD_NUMBER; + hash = (53 * hash) + getFavoredNodeList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -2631,7 +3858,7 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -2642,12 +3869,12 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -2657,7 +3884,7 @@ public final class HBaseProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -2670,18 +3897,18 @@ public final class HBaseProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodesOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionInfo_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_FavoredNodes_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionInfo_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_FavoredNodes_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -2692,6 +3919,7 @@ public final class HBaseProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getFavoredNodeFieldBuilder(); } } private static Builder create() { @@ -2700,18 +3928,12 @@ public final class HBaseProtos { public Builder clear() { super.clear(); - regionId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - tableName_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - startKey_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000004); - endKey_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000008); - offline_ = false; - bitField0_ = (bitField0_ & ~0x00000010); - split_ = false; - bitField0_ = (bitField0_ & ~0x00000020); + if (favoredNodeBuilder_ == null) { + favoredNode_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + favoredNodeBuilder_.clear(); + } return this; } @@ -2721,24 +3943,24 @@ public final class HBaseProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -2746,80 +3968,69 @@ public final class HBaseProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes(this); int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.regionId_ = regionId_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.tableName_ = tableName_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.startKey_ = startKey_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.endKey_ = endKey_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.offline_ = offline_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; + if (favoredNodeBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + favoredNode_ = java.util.Collections.unmodifiableList(favoredNode_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.favoredNode_ = favoredNode_; + } else { + result.favoredNode_ = favoredNodeBuilder_.build(); } - result.split_ = split_; - result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) return this; - if (other.hasRegionId()) { - setRegionId(other.getRegionId()); - } - if (other.hasTableName()) { - setTableName(other.getTableName()); - } - if (other.hasStartKey()) { - setStartKey(other.getStartKey()); - } - if (other.hasEndKey()) { - setEndKey(other.getEndKey()); - } - if (other.hasOffline()) { - setOffline(other.getOffline()); - } - if (other.hasSplit()) { - setSplit(other.getSplit()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.getDefaultInstance()) return this; + if (favoredNodeBuilder_ == null) { + if (!other.favoredNode_.isEmpty()) { + if (favoredNode_.isEmpty()) { + favoredNode_ = other.favoredNode_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureFavoredNodeIsMutable(); + favoredNode_.addAll(other.favoredNode_); + } + onChanged(); + } + } else { + if (!other.favoredNode_.isEmpty()) { + if (favoredNodeBuilder_.isEmpty()) { + favoredNodeBuilder_.dispose(); + favoredNodeBuilder_ = null; + favoredNode_ = other.favoredNode_; + bitField0_ = (bitField0_ & ~0x00000001); + favoredNodeBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getFavoredNodeFieldBuilder() : null; + } else { + favoredNodeBuilder_.addAllMessages(other.favoredNode_); + } + } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasRegionId()) { - - return false; - } - if (!hasTableName()) { - - return false; + for (int i = 0; i < getFavoredNodeCount(); i++) { + if (!getFavoredNode(i).isInitialized()) { + + return false; + } } return true; } @@ -2847,34 +4058,10 @@ public final class HBaseProtos { } break; } - case 8: { - bitField0_ |= 0x00000001; - regionId_ = input.readUInt64(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - tableName_ = input.readBytes(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - startKey_ = input.readBytes(); - break; - } - case 34: { - bitField0_ |= 0x00000008; - endKey_ = input.readBytes(); - break; - } - case 40: { - bitField0_ |= 0x00000010; - offline_ = input.readBool(); - break; - } - case 48: { - bitField0_ |= 0x00000020; - split_ = input.readBool(); + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addFavoredNode(subBuilder.buildPartial()); break; } } @@ -2883,227 +4070,348 @@ public final class HBaseProtos { private int bitField0_; - // required uint64 region_id = 1; - private long regionId_ ; - public boolean hasRegionId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public long getRegionId() { - return regionId_; - } - public Builder setRegionId(long value) { - bitField0_ |= 0x00000001; - regionId_ = value; - onChanged(); - return this; - } - public Builder clearRegionId() { - bitField0_ = (bitField0_ & ~0x00000001); - regionId_ = 0L; - onChanged(); - return this; + // repeated .ServerName favored_node = 1; + private java.util.List favoredNode_ = + java.util.Collections.emptyList(); + private void ensureFavoredNodeIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + favoredNode_ = new java.util.ArrayList(favoredNode_); + bitField0_ |= 0x00000001; + } } - // required bytes table_name = 2; - private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasTableName() { - return ((bitField0_ & 0x00000002) == 0x00000002); + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> favoredNodeBuilder_; + + public java.util.List getFavoredNodeList() { + if (favoredNodeBuilder_ == null) { + return java.util.Collections.unmodifiableList(favoredNode_); + } else { + return favoredNodeBuilder_.getMessageList(); + } } - public com.google.protobuf.ByteString getTableName() { - return tableName_; + public int getFavoredNodeCount() { + if (favoredNodeBuilder_ == null) { + return favoredNode_.size(); + } else { + return favoredNodeBuilder_.getCount(); + } } - public Builder setTableName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - tableName_ = value; - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNode(int index) { + if (favoredNodeBuilder_ == null) { + return favoredNode_.get(index); + } else { + return favoredNodeBuilder_.getMessage(index); + } } - public Builder clearTableName() { - bitField0_ = (bitField0_ & ~0x00000002); - tableName_ = getDefaultInstance().getTableName(); - onChanged(); + public Builder setFavoredNode( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (favoredNodeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFavoredNodeIsMutable(); + favoredNode_.set(index, value); + onChanged(); + } else { + favoredNodeBuilder_.setMessage(index, value); + } return this; } - - // optional bytes start_key = 3; - private com.google.protobuf.ByteString startKey_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasStartKey() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public com.google.protobuf.ByteString getStartKey() { - return startKey_; + public Builder setFavoredNode( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (favoredNodeBuilder_ == null) { + ensureFavoredNodeIsMutable(); + favoredNode_.set(index, builderForValue.build()); + onChanged(); + } else { + favoredNodeBuilder_.setMessage(index, builderForValue.build()); + } + return this; } - public Builder setStartKey(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - startKey_ = value; - onChanged(); + public Builder addFavoredNode(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (favoredNodeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFavoredNodeIsMutable(); + favoredNode_.add(value); + onChanged(); + } else { + favoredNodeBuilder_.addMessage(value); + } return this; } - public Builder clearStartKey() { - bitField0_ = (bitField0_ & ~0x00000004); - startKey_ = getDefaultInstance().getStartKey(); - onChanged(); + public Builder addFavoredNode( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (favoredNodeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFavoredNodeIsMutable(); + favoredNode_.add(index, value); + onChanged(); + } else { + favoredNodeBuilder_.addMessage(index, value); + } return this; } - - // optional bytes end_key = 4; - private com.google.protobuf.ByteString endKey_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasEndKey() { - return ((bitField0_ & 0x00000008) == 0x00000008); + public Builder addFavoredNode( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (favoredNodeBuilder_ == null) { + ensureFavoredNodeIsMutable(); + favoredNode_.add(builderForValue.build()); + onChanged(); + } else { + favoredNodeBuilder_.addMessage(builderForValue.build()); + } + return this; } - public com.google.protobuf.ByteString getEndKey() { - return endKey_; + public Builder addFavoredNode( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (favoredNodeBuilder_ == null) { + ensureFavoredNodeIsMutable(); + favoredNode_.add(index, builderForValue.build()); + onChanged(); + } else { + favoredNodeBuilder_.addMessage(index, builderForValue.build()); + } + return this; } - public Builder setEndKey(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - endKey_ = value; - onChanged(); + public Builder addAllFavoredNode( + java.lang.Iterable values) { + if (favoredNodeBuilder_ == null) { + ensureFavoredNodeIsMutable(); + super.addAll(values, favoredNode_); + onChanged(); + } else { + favoredNodeBuilder_.addAllMessages(values); + } return this; } - public Builder clearEndKey() { - bitField0_ = (bitField0_ & ~0x00000008); - endKey_ = getDefaultInstance().getEndKey(); - onChanged(); + public Builder clearFavoredNode() { + if (favoredNodeBuilder_ == null) { + favoredNode_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + favoredNodeBuilder_.clear(); + } return this; } - - // optional bool offline = 5; - private boolean offline_ ; - public boolean hasOffline() { - return ((bitField0_ & 0x00000010) == 0x00000010); + public Builder removeFavoredNode(int index) { + if (favoredNodeBuilder_ == null) { + ensureFavoredNodeIsMutable(); + favoredNode_.remove(index); + onChanged(); + } else { + favoredNodeBuilder_.remove(index); + } + return this; } - public boolean getOffline() { - return offline_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getFavoredNodeBuilder( + int index) { + return getFavoredNodeFieldBuilder().getBuilder(index); } - public Builder setOffline(boolean value) { - bitField0_ |= 0x00000010; - offline_ = value; - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodeOrBuilder( + int index) { + if (favoredNodeBuilder_ == null) { + return favoredNode_.get(index); } else { + return favoredNodeBuilder_.getMessageOrBuilder(index); + } } - public Builder clearOffline() { - bitField0_ = (bitField0_ & ~0x00000010); - offline_ = false; - onChanged(); - return this; + public java.util.List + getFavoredNodeOrBuilderList() { + if (favoredNodeBuilder_ != null) { + return favoredNodeBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(favoredNode_); + } } - - // optional bool split = 6; - private boolean split_ ; - public boolean hasSplit() { - return ((bitField0_ & 0x00000020) == 0x00000020); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addFavoredNodeBuilder() { + return getFavoredNodeFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); } - public boolean getSplit() { - return split_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addFavoredNodeBuilder( + int index) { + return getFavoredNodeFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); } - public Builder setSplit(boolean value) { - bitField0_ |= 0x00000020; - split_ = value; - onChanged(); - return this; + public java.util.List + getFavoredNodeBuilderList() { + return getFavoredNodeFieldBuilder().getBuilderList(); } - public Builder clearSplit() { - bitField0_ = (bitField0_ & ~0x00000020); - split_ = false; - onChanged(); - return this; + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getFavoredNodeFieldBuilder() { + if (favoredNodeBuilder_ == null) { + favoredNodeBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + favoredNode_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + favoredNode_ = null; + } + return favoredNodeBuilder_; } - // @@protoc_insertion_point(builder_scope:RegionInfo) + // @@protoc_insertion_point(builder_scope:FavoredNodes) } static { - defaultInstance = new RegionInfo(true); + defaultInstance = new FavoredNodes(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:RegionInfo) + // @@protoc_insertion_point(class_scope:FavoredNodes) } - public interface FavoredNodesOrBuilder + public interface RegionSpecifierOrBuilder extends com.google.protobuf.MessageOrBuilder { - // repeated .ServerName favored_node = 1; - java.util.List - getFavoredNodeList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNode(int index); - int getFavoredNodeCount(); - java.util.List - getFavoredNodeOrBuilderList(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodeOrBuilder( - int index); + // required .RegionSpecifier.RegionSpecifierType type = 1; + boolean hasType(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType getType(); + + // required bytes value = 2; + boolean hasValue(); + com.google.protobuf.ByteString getValue(); } - public static final class FavoredNodes extends + public static final class RegionSpecifier extends com.google.protobuf.GeneratedMessage - implements FavoredNodesOrBuilder { - // Use FavoredNodes.newBuilder() to construct. - private FavoredNodes(Builder builder) { + implements RegionSpecifierOrBuilder { + // Use RegionSpecifier.newBuilder() to construct. + private RegionSpecifier(Builder builder) { super(builder); } - private FavoredNodes(boolean noInit) {} + private RegionSpecifier(boolean noInit) {} - private static final FavoredNodes defaultInstance; - public static FavoredNodes getDefaultInstance() { + private static final RegionSpecifier defaultInstance; + public static RegionSpecifier getDefaultInstance() { return defaultInstance; } - public FavoredNodes getDefaultInstanceForType() { + public RegionSpecifier getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_FavoredNodes_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionSpecifier_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_FavoredNodes_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionSpecifier_fieldAccessorTable; } - // repeated .ServerName favored_node = 1; - public static final int FAVORED_NODE_FIELD_NUMBER = 1; - private java.util.List favoredNode_; - public java.util.List getFavoredNodeList() { - return favoredNode_; + public enum RegionSpecifierType + implements com.google.protobuf.ProtocolMessageEnum { + REGION_NAME(0, 1), + ENCODED_REGION_NAME(1, 2), + ; + + public static final int REGION_NAME_VALUE = 1; + public static final int ENCODED_REGION_NAME_VALUE = 2; + + + public final int getNumber() { return value; } + + public static RegionSpecifierType valueOf(int value) { + switch (value) { + case 1: return REGION_NAME; + case 2: return ENCODED_REGION_NAME; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public RegionSpecifierType findValueByNumber(int number) { + return RegionSpecifierType.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDescriptor().getEnumTypes().get(0); + } + + private static final RegionSpecifierType[] VALUES = { + REGION_NAME, ENCODED_REGION_NAME, + }; + + public static RegionSpecifierType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private RegionSpecifierType(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:RegionSpecifier.RegionSpecifierType) } - public java.util.List - getFavoredNodeOrBuilderList() { - return favoredNode_; + + private int bitField0_; + // required .RegionSpecifier.RegionSpecifierType type = 1; + public static final int TYPE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType type_; + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); } - public int getFavoredNodeCount() { - return favoredNode_.size(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType getType() { + return type_; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNode(int index) { - return favoredNode_.get(index); + + // required bytes value = 2; + public static final int VALUE_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString value_; + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodeOrBuilder( - int index) { - return favoredNode_.get(index); + public com.google.protobuf.ByteString getValue() { + return value_; } private void initFields() { - favoredNode_ = java.util.Collections.emptyList(); + type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; + value_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - for (int i = 0; i < getFavoredNodeCount(); i++) { - if (!getFavoredNode(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasValue()) { + memoizedIsInitialized = 0; + return false; } memoizedIsInitialized = 1; return true; @@ -3112,8 +4420,11 @@ public final class HBaseProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - for (int i = 0; i < favoredNode_.size(); i++) { - output.writeMessage(1, favoredNode_.get(i)); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, type_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, value_); } getUnknownFields().writeTo(output); } @@ -3124,9 +4435,13 @@ public final class HBaseProtos { if (size != -1) return size; size = 0; - for (int i = 0; i < favoredNode_.size(); i++) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, favoredNode_.get(i)); + .computeEnumSize(1, type_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, value_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -3145,14 +4460,22 @@ public final class HBaseProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier) obj; boolean result = true; - result = result && getFavoredNodeList() - .equals(other.getFavoredNodeList()); + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); + } + result = result && (hasValue() == other.hasValue()); + if (hasValue()) { + result = result && getValue() + .equals(other.getValue()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -3162,49 +4485,53 @@ public final class HBaseProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getFavoredNodeCount() > 0) { - hash = (37 * hash) + FAVORED_NODE_FIELD_NUMBER; - hash = (53 * hash) + getFavoredNodeList().hashCode(); + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); + } + if (hasValue()) { + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = (53 * hash) + getValue().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -3213,7 +4540,7 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -3224,12 +4551,12 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -3239,7 +4566,7 @@ public final class HBaseProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -3252,18 +4579,18 @@ public final class HBaseProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodesOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_FavoredNodes_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionSpecifier_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_FavoredNodes_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionSpecifier_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -3274,7 +4601,6 @@ public final class HBaseProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getFavoredNodeFieldBuilder(); } } private static Builder create() { @@ -3283,12 +4609,10 @@ public final class HBaseProtos { public Builder clear() { super.clear(); - if (favoredNodeBuilder_ == null) { - favoredNode_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - favoredNodeBuilder_.clear(); - } + type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; + bitField0_ = (bitField0_ & ~0x00000001); + value_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -3298,24 +4622,24 @@ public final class HBaseProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -3323,69 +4647,52 @@ public final class HBaseProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier(this); int from_bitField0_ = bitField0_; - if (favoredNodeBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001)) { - favoredNode_ = java.util.Collections.unmodifiableList(favoredNode_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.favoredNode_ = favoredNode_; - } else { - result.favoredNode_ = favoredNodeBuilder_.build(); + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.type_ = type_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; } + result.value_ = value_; + result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.getDefaultInstance()) return this; - if (favoredNodeBuilder_ == null) { - if (!other.favoredNode_.isEmpty()) { - if (favoredNode_.isEmpty()) { - favoredNode_ = other.favoredNode_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureFavoredNodeIsMutable(); - favoredNode_.addAll(other.favoredNode_); - } - onChanged(); - } - } else { - if (!other.favoredNode_.isEmpty()) { - if (favoredNodeBuilder_.isEmpty()) { - favoredNodeBuilder_.dispose(); - favoredNodeBuilder_ = null; - favoredNode_ = other.favoredNode_; - bitField0_ = (bitField0_ & ~0x00000001); - favoredNodeBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getFavoredNodeFieldBuilder() : null; - } else { - favoredNodeBuilder_.addAllMessages(other.favoredNode_); - } - } + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) return this; + if (other.hasType()) { + setType(other.getType()); + } + if (other.hasValue()) { + setValue(other.getValue()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - for (int i = 0; i < getFavoredNodeCount(); i++) { - if (!getFavoredNode(i).isInitialized()) { - - return false; - } + if (!hasType()) { + + return false; + } + if (!hasValue()) { + + return false; } return true; } @@ -3413,10 +4720,20 @@ public final class HBaseProtos { } break; } - case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(); - input.readMessage(subBuilder, extensionRegistry); - addFavoredNode(subBuilder.buildPartial()); + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + type_ = value; + } + break; + } + case 18: { + bitField0_ |= 0x00000002; + value_ = input.readBytes(); break; } } @@ -3425,349 +4742,134 @@ public final class HBaseProtos { private int bitField0_; - // repeated .ServerName favored_node = 1; - private java.util.List favoredNode_ = - java.util.Collections.emptyList(); - private void ensureFavoredNodeIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - favoredNode_ = new java.util.ArrayList(favoredNode_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> favoredNodeBuilder_; - - public java.util.List getFavoredNodeList() { - if (favoredNodeBuilder_ == null) { - return java.util.Collections.unmodifiableList(favoredNode_); - } else { - return favoredNodeBuilder_.getMessageList(); - } - } - public int getFavoredNodeCount() { - if (favoredNodeBuilder_ == null) { - return favoredNode_.size(); - } else { - return favoredNodeBuilder_.getCount(); - } - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getFavoredNode(int index) { - if (favoredNodeBuilder_ == null) { - return favoredNode_.get(index); - } else { - return favoredNodeBuilder_.getMessage(index); - } - } - public Builder setFavoredNode( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (favoredNodeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFavoredNodeIsMutable(); - favoredNode_.set(index, value); - onChanged(); - } else { - favoredNodeBuilder_.setMessage(index, value); - } - return this; - } - public Builder setFavoredNode( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (favoredNodeBuilder_ == null) { - ensureFavoredNodeIsMutable(); - favoredNode_.set(index, builderForValue.build()); - onChanged(); - } else { - favoredNodeBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - public Builder addFavoredNode(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (favoredNodeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFavoredNodeIsMutable(); - favoredNode_.add(value); - onChanged(); - } else { - favoredNodeBuilder_.addMessage(value); - } - return this; - } - public Builder addFavoredNode( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { - if (favoredNodeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFavoredNodeIsMutable(); - favoredNode_.add(index, value); - onChanged(); - } else { - favoredNodeBuilder_.addMessage(index, value); - } - return this; - } - public Builder addFavoredNode( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (favoredNodeBuilder_ == null) { - ensureFavoredNodeIsMutable(); - favoredNode_.add(builderForValue.build()); - onChanged(); - } else { - favoredNodeBuilder_.addMessage(builderForValue.build()); - } - return this; - } - public Builder addFavoredNode( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { - if (favoredNodeBuilder_ == null) { - ensureFavoredNodeIsMutable(); - favoredNode_.add(index, builderForValue.build()); - onChanged(); - } else { - favoredNodeBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - public Builder addAllFavoredNode( - java.lang.Iterable values) { - if (favoredNodeBuilder_ == null) { - ensureFavoredNodeIsMutable(); - super.addAll(values, favoredNode_); - onChanged(); - } else { - favoredNodeBuilder_.addAllMessages(values); - } - return this; - } - public Builder clearFavoredNode() { - if (favoredNodeBuilder_ == null) { - favoredNode_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - favoredNodeBuilder_.clear(); - } - return this; - } - public Builder removeFavoredNode(int index) { - if (favoredNodeBuilder_ == null) { - ensureFavoredNodeIsMutable(); - favoredNode_.remove(index); - onChanged(); - } else { - favoredNodeBuilder_.remove(index); - } - return this; - } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getFavoredNodeBuilder( - int index) { - return getFavoredNodeFieldBuilder().getBuilder(index); + // required .RegionSpecifier.RegionSpecifierType type = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getFavoredNodeOrBuilder( - int index) { - if (favoredNodeBuilder_ == null) { - return favoredNode_.get(index); } else { - return favoredNodeBuilder_.getMessageOrBuilder(index); - } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType getType() { + return type_; } - public java.util.List - getFavoredNodeOrBuilderList() { - if (favoredNodeBuilder_ != null) { - return favoredNodeBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(favoredNode_); + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType value) { + if (value == null) { + throw new NullPointerException(); } + bitField0_ |= 0x00000001; + type_ = value; + onChanged(); + return this; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addFavoredNodeBuilder() { - return getFavoredNodeFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000001); + type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; + onChanged(); + return this; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addFavoredNodeBuilder( - int index) { - return getFavoredNodeFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + + // required bytes value = 2; + private com.google.protobuf.ByteString value_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); } - public java.util.List - getFavoredNodeBuilderList() { - return getFavoredNodeFieldBuilder().getBuilderList(); + public com.google.protobuf.ByteString getValue() { + return value_; } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> - getFavoredNodeFieldBuilder() { - if (favoredNodeBuilder_ == null) { - favoredNodeBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( - favoredNode_, - ((bitField0_ & 0x00000001) == 0x00000001), - getParentForChildren(), - isClean()); - favoredNode_ = null; - } - return favoredNodeBuilder_; + public Builder setValue(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000002); + value_ = getDefaultInstance().getValue(); + onChanged(); + return this; } - // @@protoc_insertion_point(builder_scope:FavoredNodes) + // @@protoc_insertion_point(builder_scope:RegionSpecifier) } static { - defaultInstance = new FavoredNodes(true); + defaultInstance = new RegionSpecifier(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:FavoredNodes) + // @@protoc_insertion_point(class_scope:RegionSpecifier) } - public interface RegionSpecifierOrBuilder + public interface TimeRangeOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required .RegionSpecifier.RegionSpecifierType type = 1; - boolean hasType(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType getType(); + // optional uint64 from = 1; + boolean hasFrom(); + long getFrom(); - // required bytes value = 2; - boolean hasValue(); - com.google.protobuf.ByteString getValue(); + // optional uint64 to = 2; + boolean hasTo(); + long getTo(); } - public static final class RegionSpecifier extends + public static final class TimeRange extends com.google.protobuf.GeneratedMessage - implements RegionSpecifierOrBuilder { - // Use RegionSpecifier.newBuilder() to construct. - private RegionSpecifier(Builder builder) { + implements TimeRangeOrBuilder { + // Use TimeRange.newBuilder() to construct. + private TimeRange(Builder builder) { super(builder); } - private RegionSpecifier(boolean noInit) {} + private TimeRange(boolean noInit) {} - private static final RegionSpecifier defaultInstance; - public static RegionSpecifier getDefaultInstance() { + private static final TimeRange defaultInstance; + public static TimeRange getDefaultInstance() { return defaultInstance; } - public RegionSpecifier getDefaultInstanceForType() { + public TimeRange getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionSpecifier_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TimeRange_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionSpecifier_fieldAccessorTable; - } - - public enum RegionSpecifierType - implements com.google.protobuf.ProtocolMessageEnum { - REGION_NAME(0, 1), - ENCODED_REGION_NAME(1, 2), - ; - - public static final int REGION_NAME_VALUE = 1; - public static final int ENCODED_REGION_NAME_VALUE = 2; - - - public final int getNumber() { return value; } - - public static RegionSpecifierType valueOf(int value) { - switch (value) { - case 1: return REGION_NAME; - case 2: return ENCODED_REGION_NAME; - default: return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public RegionSpecifierType findValueByNumber(int number) { - return RegionSpecifierType.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDescriptor().getEnumTypes().get(0); - } - - private static final RegionSpecifierType[] VALUES = { - REGION_NAME, ENCODED_REGION_NAME, - }; - - public static RegionSpecifierType valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); - } - return VALUES[desc.getIndex()]; - } - - private final int index; - private final int value; - - private RegionSpecifierType(int index, int value) { - this.index = index; - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:RegionSpecifier.RegionSpecifierType) + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TimeRange_fieldAccessorTable; } private int bitField0_; - // required .RegionSpecifier.RegionSpecifierType type = 1; - public static final int TYPE_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType type_; - public boolean hasType() { + // optional uint64 from = 1; + public static final int FROM_FIELD_NUMBER = 1; + private long from_; + public boolean hasFrom() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType getType() { - return type_; + public long getFrom() { + return from_; } - // required bytes value = 2; - public static final int VALUE_FIELD_NUMBER = 2; - private com.google.protobuf.ByteString value_; - public boolean hasValue() { + // optional uint64 to = 2; + public static final int TO_FIELD_NUMBER = 2; + private long to_; + public boolean hasTo() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public com.google.protobuf.ByteString getValue() { - return value_; + public long getTo() { + return to_; } private void initFields() { - type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; - value_ = com.google.protobuf.ByteString.EMPTY; + from_ = 0L; + to_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasType()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasValue()) { - memoizedIsInitialized = 0; - return false; - } memoizedIsInitialized = 1; return true; } @@ -3776,10 +4878,10 @@ public final class HBaseProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, type_.getNumber()); + output.writeUInt64(1, from_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, value_); + output.writeUInt64(2, to_); } getUnknownFields().writeTo(output); } @@ -3792,11 +4894,11 @@ public final class HBaseProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, type_.getNumber()); + .computeUInt64Size(1, from_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, value_); + .computeUInt64Size(2, to_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -3815,21 +4917,21 @@ public final class HBaseProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange) obj; boolean result = true; - result = result && (hasType() == other.hasType()); - if (hasType()) { - result = result && - (getType() == other.getType()); + result = result && (hasFrom() == other.hasFrom()); + if (hasFrom()) { + result = result && (getFrom() + == other.getFrom()); } - result = result && (hasValue() == other.hasValue()); - if (hasValue()) { - result = result && getValue() - .equals(other.getValue()); + result = result && (hasTo() == other.hasTo()); + if (hasTo()) { + result = result && (getTo() + == other.getTo()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -3840,53 +4942,53 @@ public final class HBaseProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasType()) { - hash = (37 * hash) + TYPE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getType()); + if (hasFrom()) { + hash = (37 * hash) + FROM_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getFrom()); } - if (hasValue()) { - hash = (37 * hash) + VALUE_FIELD_NUMBER; - hash = (53 * hash) + getValue().hashCode(); + if (hasTo()) { + hash = (37 * hash) + TO_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTo()); } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -3895,7 +4997,7 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -3906,12 +5008,12 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -3921,7 +5023,7 @@ public final class HBaseProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -3934,18 +5036,18 @@ public final class HBaseProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionSpecifier_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TimeRange_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_RegionSpecifier_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TimeRange_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -3964,9 +5066,9 @@ public final class HBaseProtos { public Builder clear() { super.clear(); - type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; + from_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); - value_ = com.google.protobuf.ByteString.EMPTY; + to_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -3977,24 +5079,24 @@ public final class HBaseProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -4002,53 +5104,45 @@ public final class HBaseProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.type_ = type_; + result.from_ = from_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.value_ = value_; + result.to_ = to_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) return this; - if (other.hasType()) { - setType(other.getType()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance()) return this; + if (other.hasFrom()) { + setFrom(other.getFrom()); } - if (other.hasValue()) { - setValue(other.getValue()); + if (other.hasTo()) { + setTo(other.getTo()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasType()) { - - return false; - } - if (!hasValue()) { - - return false; - } return true; } @@ -4076,19 +5170,13 @@ public final class HBaseProtos { break; } case 8: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(1, rawValue); - } else { - bitField0_ |= 0x00000001; - type_ = value; - } + bitField0_ |= 0x00000001; + from_ = input.readUInt64(); break; } - case 18: { + case 16: { bitField0_ |= 0x00000002; - value_ = input.readBytes(); + to_ = input.readUInt64(); break; } } @@ -4097,134 +5185,169 @@ public final class HBaseProtos { private int bitField0_; - // required .RegionSpecifier.RegionSpecifierType type = 1; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; - public boolean hasType() { + // optional uint64 from = 1; + private long from_ ; + public boolean hasFrom() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType getType() { - return type_; + public long getFrom() { + return from_; } - public Builder setType(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType value) { - if (value == null) { - throw new NullPointerException(); - } + public Builder setFrom(long value) { bitField0_ |= 0x00000001; - type_ = value; + from_ = value; onChanged(); return this; } - public Builder clearType() { + public Builder clearFrom() { bitField0_ = (bitField0_ & ~0x00000001); - type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; + from_ = 0L; onChanged(); return this; } - // required bytes value = 2; - private com.google.protobuf.ByteString value_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasValue() { + // optional uint64 to = 2; + private long to_ ; + public boolean hasTo() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public com.google.protobuf.ByteString getValue() { - return value_; + public long getTo() { + return to_; } - public Builder setValue(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - value_ = value; + public Builder setTo(long value) { + bitField0_ |= 0x00000002; + to_ = value; onChanged(); return this; } - public Builder clearValue() { + public Builder clearTo() { bitField0_ = (bitField0_ & ~0x00000002); - value_ = getDefaultInstance().getValue(); + to_ = 0L; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:RegionSpecifier) + // @@protoc_insertion_point(builder_scope:TimeRange) } static { - defaultInstance = new RegionSpecifier(true); + defaultInstance = new TimeRange(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:RegionSpecifier) + // @@protoc_insertion_point(class_scope:TimeRange) } - public interface TimeRangeOrBuilder + public interface ServerNameOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional uint64 from = 1; - boolean hasFrom(); - long getFrom(); + // required string host_name = 1; + boolean hasHostName(); + String getHostName(); - // optional uint64 to = 2; - boolean hasTo(); - long getTo(); + // optional uint32 port = 2; + boolean hasPort(); + int getPort(); + + // optional uint64 start_code = 3; + boolean hasStartCode(); + long getStartCode(); } - public static final class TimeRange extends + public static final class ServerName extends com.google.protobuf.GeneratedMessage - implements TimeRangeOrBuilder { - // Use TimeRange.newBuilder() to construct. - private TimeRange(Builder builder) { + implements ServerNameOrBuilder { + // Use ServerName.newBuilder() to construct. + private ServerName(Builder builder) { super(builder); } - private TimeRange(boolean noInit) {} + private ServerName(boolean noInit) {} - private static final TimeRange defaultInstance; - public static TimeRange getDefaultInstance() { + private static final ServerName defaultInstance; + public static ServerName getDefaultInstance() { return defaultInstance; } - public TimeRange getDefaultInstanceForType() { + public ServerName getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TimeRange_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ServerName_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TimeRange_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ServerName_fieldAccessorTable; } private int bitField0_; - // optional uint64 from = 1; - public static final int FROM_FIELD_NUMBER = 1; - private long from_; - public boolean hasFrom() { + // required string host_name = 1; + public static final int HOST_NAME_FIELD_NUMBER = 1; + private java.lang.Object hostName_; + public boolean hasHostName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public long getFrom() { - return from_; + public String getHostName() { + java.lang.Object ref = hostName_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + hostName_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getHostNameBytes() { + java.lang.Object ref = hostName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + hostName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } - // optional uint64 to = 2; - public static final int TO_FIELD_NUMBER = 2; - private long to_; - public boolean hasTo() { + // optional uint32 port = 2; + public static final int PORT_FIELD_NUMBER = 2; + private int port_; + public boolean hasPort() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public long getTo() { - return to_; + public int getPort() { + return port_; + } + + // optional uint64 start_code = 3; + public static final int START_CODE_FIELD_NUMBER = 3; + private long startCode_; + public boolean hasStartCode() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getStartCode() { + return startCode_; } private void initFields() { - from_ = 0L; - to_ = 0L; + hostName_ = ""; + port_ = 0; + startCode_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (!hasHostName()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -4233,10 +5356,13 @@ public final class HBaseProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, from_); + output.writeBytes(1, getHostNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, to_); + output.writeUInt32(2, port_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, startCode_); } getUnknownFields().writeTo(output); } @@ -4249,11 +5375,15 @@ public final class HBaseProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, from_); + .computeBytesSize(1, getHostNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, to_); + .computeUInt32Size(2, port_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, startCode_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -4272,21 +5402,26 @@ public final class HBaseProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName) obj; boolean result = true; - result = result && (hasFrom() == other.hasFrom()); - if (hasFrom()) { - result = result && (getFrom() - == other.getFrom()); + result = result && (hasHostName() == other.hasHostName()); + if (hasHostName()) { + result = result && getHostName() + .equals(other.getHostName()); } - result = result && (hasTo() == other.hasTo()); - if (hasTo()) { - result = result && (getTo() - == other.getTo()); + result = result && (hasPort() == other.hasPort()); + if (hasPort()) { + result = result && (getPort() + == other.getPort()); + } + result = result && (hasStartCode() == other.hasStartCode()); + if (hasStartCode()) { + result = result && (getStartCode() + == other.getStartCode()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -4297,53 +5432,57 @@ public final class HBaseProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasFrom()) { - hash = (37 * hash) + FROM_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getFrom()); + if (hasHostName()) { + hash = (37 * hash) + HOST_NAME_FIELD_NUMBER; + hash = (53 * hash) + getHostName().hashCode(); } - if (hasTo()) { - hash = (37 * hash) + TO_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getTo()); + if (hasPort()) { + hash = (37 * hash) + PORT_FIELD_NUMBER; + hash = (53 * hash) + getPort(); + } + if (hasStartCode()) { + hash = (37 * hash) + START_CODE_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartCode()); } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -4352,7 +5491,7 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -4363,12 +5502,12 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -4378,7 +5517,7 @@ public final class HBaseProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -4391,18 +5530,18 @@ public final class HBaseProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRangeOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TimeRange_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ServerName_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TimeRange_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ServerName_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -4421,10 +5560,12 @@ public final class HBaseProtos { public Builder clear() { super.clear(); - from_ = 0L; + hostName_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - to_ = 0L; + port_ = 0; bitField0_ = (bitField0_ & ~0x00000002); + startCode_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -4434,24 +5575,24 @@ public final class HBaseProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -4459,45 +5600,56 @@ public final class HBaseProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.from_ = from_; + result.hostName_ = hostName_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.to_ = to_; + result.port_ = port_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.startCode_ = startCode_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.getDefaultInstance()) return this; - if (other.hasFrom()) { - setFrom(other.getFrom()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) return this; + if (other.hasHostName()) { + setHostName(other.getHostName()); } - if (other.hasTo()) { - setTo(other.getTo()); + if (other.hasPort()) { + setPort(other.getPort()); + } + if (other.hasStartCode()) { + setStartCode(other.getStartCode()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + if (!hasHostName()) { + + return false; + } return true; } @@ -4524,14 +5676,19 @@ public final class HBaseProtos { } break; } - case 8: { + case 10: { bitField0_ |= 0x00000001; - from_ = input.readUInt64(); + hostName_ = input.readBytes(); break; } case 16: { bitField0_ |= 0x00000002; - to_ = input.readUInt64(); + port_ = input.readUInt32(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + startCode_ = input.readUInt64(); break; } } @@ -4540,111 +5697,139 @@ public final class HBaseProtos { private int bitField0_; - // optional uint64 from = 1; - private long from_ ; - public boolean hasFrom() { + // required string host_name = 1; + private java.lang.Object hostName_ = ""; + public boolean hasHostName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public long getFrom() { - return from_; + public String getHostName() { + java.lang.Object ref = hostName_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + hostName_ = s; + return s; + } else { + return (String) ref; + } } - public Builder setFrom(long value) { - bitField0_ |= 0x00000001; - from_ = value; + public Builder setHostName(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + hostName_ = value; onChanged(); return this; } - public Builder clearFrom() { + public Builder clearHostName() { bitField0_ = (bitField0_ & ~0x00000001); - from_ = 0L; + hostName_ = getDefaultInstance().getHostName(); onChanged(); return this; } + void setHostName(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + hostName_ = value; + onChanged(); + } - // optional uint64 to = 2; - private long to_ ; - public boolean hasTo() { + // optional uint32 port = 2; + private int port_ ; + public boolean hasPort() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public long getTo() { - return to_; + public int getPort() { + return port_; } - public Builder setTo(long value) { + public Builder setPort(int value) { bitField0_ |= 0x00000002; - to_ = value; + port_ = value; onChanged(); return this; } - public Builder clearTo() { + public Builder clearPort() { bitField0_ = (bitField0_ & ~0x00000002); - to_ = 0L; + port_ = 0; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:TimeRange) + // optional uint64 start_code = 3; + private long startCode_ ; + public boolean hasStartCode() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getStartCode() { + return startCode_; + } + public Builder setStartCode(long value) { + bitField0_ |= 0x00000004; + startCode_ = value; + onChanged(); + return this; + } + public Builder clearStartCode() { + bitField0_ = (bitField0_ & ~0x00000004); + startCode_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ServerName) } static { - defaultInstance = new TimeRange(true); + defaultInstance = new ServerName(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:TimeRange) + // @@protoc_insertion_point(class_scope:ServerName) } - public interface ServerNameOrBuilder + public interface CoprocessorOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required string host_name = 1; - boolean hasHostName(); - String getHostName(); - - // optional uint32 port = 2; - boolean hasPort(); - int getPort(); - - // optional uint64 start_code = 3; - boolean hasStartCode(); - long getStartCode(); + // required string name = 1; + boolean hasName(); + String getName(); } - public static final class ServerName extends + public static final class Coprocessor extends com.google.protobuf.GeneratedMessage - implements ServerNameOrBuilder { - // Use ServerName.newBuilder() to construct. - private ServerName(Builder builder) { + implements CoprocessorOrBuilder { + // Use Coprocessor.newBuilder() to construct. + private Coprocessor(Builder builder) { super(builder); } - private ServerName(boolean noInit) {} + private Coprocessor(boolean noInit) {} - private static final ServerName defaultInstance; - public static ServerName getDefaultInstance() { + private static final Coprocessor defaultInstance; + public static Coprocessor getDefaultInstance() { return defaultInstance; } - public ServerName getDefaultInstanceForType() { + public Coprocessor getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ServerName_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_Coprocessor_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ServerName_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_Coprocessor_fieldAccessorTable; } private int bitField0_; - // required string host_name = 1; - public static final int HOST_NAME_FIELD_NUMBER = 1; - private java.lang.Object hostName_; - public boolean hasHostName() { + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public String getHostName() { - java.lang.Object ref = hostName_; + public String getName() { + java.lang.Object ref = name_; if (ref instanceof String) { return (String) ref; } else { @@ -4652,54 +5837,32 @@ public final class HBaseProtos { (com.google.protobuf.ByteString) ref; String s = bs.toStringUtf8(); if (com.google.protobuf.Internal.isValidUtf8(bs)) { - hostName_ = s; + name_ = s; } return s; } } - private com.google.protobuf.ByteString getHostNameBytes() { - java.lang.Object ref = hostName_; + private com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); - hostName_ = b; + name_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - // optional uint32 port = 2; - public static final int PORT_FIELD_NUMBER = 2; - private int port_; - public boolean hasPort() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getPort() { - return port_; - } - - // optional uint64 start_code = 3; - public static final int START_CODE_FIELD_NUMBER = 3; - private long startCode_; - public boolean hasStartCode() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getStartCode() { - return startCode_; - } - private void initFields() { - hostName_ = ""; - port_ = 0; - startCode_ = 0L; + name_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasHostName()) { + if (!hasName()) { memoizedIsInitialized = 0; return false; } @@ -4711,13 +5874,7 @@ public final class HBaseProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getHostNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt32(2, port_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeUInt64(3, startCode_); + output.writeBytes(1, getNameBytes()); } getUnknownFields().writeTo(output); } @@ -4730,15 +5887,7 @@ public final class HBaseProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getHostNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(2, port_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(3, startCode_); + .computeBytesSize(1, getNameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -4757,26 +5906,16 @@ public final class HBaseProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor) obj; boolean result = true; - result = result && (hasHostName() == other.hasHostName()); - if (hasHostName()) { - result = result && getHostName() - .equals(other.getHostName()); - } - result = result && (hasPort() == other.hasPort()); - if (hasPort()) { - result = result && (getPort() - == other.getPort()); - } - result = result && (hasStartCode() == other.hasStartCode()); - if (hasStartCode()) { - result = result && (getStartCode() - == other.getStartCode()); + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -4787,57 +5926,49 @@ public final class HBaseProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasHostName()) { - hash = (37 * hash) + HOST_NAME_FIELD_NUMBER; - hash = (53 * hash) + getHostName().hashCode(); - } - if (hasPort()) { - hash = (37 * hash) + PORT_FIELD_NUMBER; - hash = (53 * hash) + getPort(); - } - if (hasStartCode()) { - hash = (37 * hash) + START_CODE_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getStartCode()); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -4846,7 +5977,7 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -4857,12 +5988,12 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -4872,7 +6003,7 @@ public final class HBaseProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -4885,18 +6016,18 @@ public final class HBaseProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ServerName_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_Coprocessor_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_ServerName_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_Coprocessor_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -4915,12 +6046,8 @@ public final class HBaseProtos { public Builder clear() { super.clear(); - hostName_ = ""; + name_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - port_ = 0; - bitField0_ = (bitField0_ & ~0x00000002); - startCode_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -4930,24 +6057,24 @@ public final class HBaseProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -4955,53 +6082,39 @@ public final class HBaseProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.hostName_ = hostName_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.port_ = port_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.startCode_ = startCode_; + result.name_ = name_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) return this; - if (other.hasHostName()) { - setHostName(other.getHostName()); - } - if (other.hasPort()) { - setPort(other.getPort()); - } - if (other.hasStartCode()) { - setStartCode(other.getStartCode()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance()) return this; + if (other.hasName()) { + setName(other.getName()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasHostName()) { + if (!hasName()) { return false; } @@ -5033,17 +6146,7 @@ public final class HBaseProtos { } case 10: { bitField0_ |= 0x00000001; - hostName_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - port_ = input.readUInt32(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - startCode_ = input.readUInt64(); + name_ = input.readBytes(); break; } } @@ -5052,128 +6155,90 @@ public final class HBaseProtos { private int bitField0_; - // required string host_name = 1; - private java.lang.Object hostName_ = ""; - public boolean hasHostName() { + // required string name = 1; + private java.lang.Object name_ = ""; + public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public String getHostName() { - java.lang.Object ref = hostName_; + public String getName() { + java.lang.Object ref = name_; if (!(ref instanceof String)) { String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - hostName_ = s; + name_ = s; return s; } else { return (String) ref; } } - public Builder setHostName(String value) { + public Builder setName(String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - hostName_ = value; + name_ = value; onChanged(); return this; } - public Builder clearHostName() { + public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000001); - hostName_ = getDefaultInstance().getHostName(); + name_ = getDefaultInstance().getName(); onChanged(); return this; } - void setHostName(com.google.protobuf.ByteString value) { + void setName(com.google.protobuf.ByteString value) { bitField0_ |= 0x00000001; - hostName_ = value; - onChanged(); - } - - // optional uint32 port = 2; - private int port_ ; - public boolean hasPort() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public int getPort() { - return port_; - } - public Builder setPort(int value) { - bitField0_ |= 0x00000002; - port_ = value; - onChanged(); - return this; - } - public Builder clearPort() { - bitField0_ = (bitField0_ & ~0x00000002); - port_ = 0; - onChanged(); - return this; - } - - // optional uint64 start_code = 3; - private long startCode_ ; - public boolean hasStartCode() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getStartCode() { - return startCode_; - } - public Builder setStartCode(long value) { - bitField0_ |= 0x00000004; - startCode_ = value; - onChanged(); - return this; - } - public Builder clearStartCode() { - bitField0_ = (bitField0_ & ~0x00000004); - startCode_ = 0L; + name_ = value; onChanged(); - return this; } - // @@protoc_insertion_point(builder_scope:ServerName) + // @@protoc_insertion_point(builder_scope:Coprocessor) } static { - defaultInstance = new ServerName(true); + defaultInstance = new Coprocessor(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:ServerName) + // @@protoc_insertion_point(class_scope:Coprocessor) } - public interface CoprocessorOrBuilder + public interface NameStringPairOrBuilder extends com.google.protobuf.MessageOrBuilder { // required string name = 1; boolean hasName(); String getName(); + + // required string value = 2; + boolean hasValue(); + String getValue(); } - public static final class Coprocessor extends + public static final class NameStringPair extends com.google.protobuf.GeneratedMessage - implements CoprocessorOrBuilder { - // Use Coprocessor.newBuilder() to construct. - private Coprocessor(Builder builder) { + implements NameStringPairOrBuilder { + // Use NameStringPair.newBuilder() to construct. + private NameStringPair(Builder builder) { super(builder); } - private Coprocessor(boolean noInit) {} + private NameStringPair(boolean noInit) {} - private static final Coprocessor defaultInstance; - public static Coprocessor getDefaultInstance() { + private static final NameStringPair defaultInstance; + public static NameStringPair getDefaultInstance() { return defaultInstance; } - public Coprocessor getDefaultInstanceForType() { + public NameStringPair getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_Coprocessor_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameStringPair_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_Coprocessor_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameStringPair_fieldAccessorTable; } private int bitField0_; @@ -5209,8 +6274,41 @@ public final class HBaseProtos { } } + // required string value = 2; + public static final int VALUE_FIELD_NUMBER = 2; + private java.lang.Object value_; + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public String getValue() { + java.lang.Object ref = value_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + value_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + private void initFields() { name_ = ""; + value_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -5221,6 +6319,10 @@ public final class HBaseProtos { memoizedIsInitialized = 0; return false; } + if (!hasValue()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -5231,6 +6333,9 @@ public final class HBaseProtos { if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getNameBytes()); } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getValueBytes()); + } getUnknownFields().writeTo(output); } @@ -5244,6 +6349,10 @@ public final class HBaseProtos { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, getNameBytes()); } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getValueBytes()); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -5261,10 +6370,10 @@ public final class HBaseProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair) obj; boolean result = true; result = result && (hasName() == other.hasName()); @@ -5272,6 +6381,11 @@ public final class HBaseProtos { result = result && getName() .equals(other.getName()); } + result = result && (hasValue() == other.hasValue()); + if (hasValue()) { + result = result && getValue() + .equals(other.getValue()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -5285,45 +6399,49 @@ public final class HBaseProtos { hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); } + if (hasValue()) { + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = (53 * hash) + getValue().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -5332,7 +6450,7 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -5343,12 +6461,12 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -5358,7 +6476,7 @@ public final class HBaseProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -5371,18 +6489,18 @@ public final class HBaseProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_Coprocessor_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameStringPair_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_Coprocessor_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameStringPair_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -5403,6 +6521,8 @@ public final class HBaseProtos { super.clear(); name_ = ""; bitField0_ = (bitField0_ & ~0x00000001); + value_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -5412,24 +6532,24 @@ public final class HBaseProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -5437,33 +6557,40 @@ public final class HBaseProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.name_ = name_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.value_ = value_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()) return this; if (other.hasName()) { setName(other.getName()); } + if (other.hasValue()) { + setValue(other.getValue()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -5473,6 +6600,10 @@ public final class HBaseProtos { return false; } + if (!hasValue()) { + + return false; + } return true; } @@ -5504,6 +6635,11 @@ public final class HBaseProtos { name_ = input.readBytes(); break; } + case 18: { + bitField0_ |= 0x00000002; + value_ = input.readBytes(); + break; + } } } } @@ -5546,54 +6682,90 @@ public final class HBaseProtos { onChanged(); } - // @@protoc_insertion_point(builder_scope:Coprocessor) + // required string value = 2; + private java.lang.Object value_ = ""; + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public String getValue() { + java.lang.Object ref = value_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + value_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setValue(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000002); + value_ = getDefaultInstance().getValue(); + onChanged(); + return this; + } + void setValue(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + } + + // @@protoc_insertion_point(builder_scope:NameStringPair) } static { - defaultInstance = new Coprocessor(true); + defaultInstance = new NameStringPair(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:Coprocessor) + // @@protoc_insertion_point(class_scope:NameStringPair) } - public interface NameStringPairOrBuilder + public interface NameBytesPairOrBuilder extends com.google.protobuf.MessageOrBuilder { // required string name = 1; boolean hasName(); String getName(); - // required string value = 2; + // optional bytes value = 2; boolean hasValue(); - String getValue(); + com.google.protobuf.ByteString getValue(); } - public static final class NameStringPair extends + public static final class NameBytesPair extends com.google.protobuf.GeneratedMessage - implements NameStringPairOrBuilder { - // Use NameStringPair.newBuilder() to construct. - private NameStringPair(Builder builder) { + implements NameBytesPairOrBuilder { + // Use NameBytesPair.newBuilder() to construct. + private NameBytesPair(Builder builder) { super(builder); } - private NameStringPair(boolean noInit) {} + private NameBytesPair(boolean noInit) {} - private static final NameStringPair defaultInstance; - public static NameStringPair getDefaultInstance() { + private static final NameBytesPair defaultInstance; + public static NameBytesPair getDefaultInstance() { return defaultInstance; } - public NameStringPair getDefaultInstanceForType() { + public NameBytesPair getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameStringPair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameBytesPair_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameStringPair_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameBytesPair_fieldAccessorTable; } private int bitField0_; @@ -5629,41 +6801,19 @@ public final class HBaseProtos { } } - // required string value = 2; - public static final int VALUE_FIELD_NUMBER = 2; - private java.lang.Object value_; - public boolean hasValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getValue() { - java.lang.Object ref = value_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - value_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getValueBytes() { - java.lang.Object ref = value_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - value_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - + // optional bytes value = 2; + public static final int VALUE_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString value_; + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public com.google.protobuf.ByteString getValue() { + return value_; + } + private void initFields() { name_ = ""; - value_ = ""; + value_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -5674,10 +6824,6 @@ public final class HBaseProtos { memoizedIsInitialized = 0; return false; } - if (!hasValue()) { - memoizedIsInitialized = 0; - return false; - } memoizedIsInitialized = 1; return true; } @@ -5689,7 +6835,7 @@ public final class HBaseProtos { output.writeBytes(1, getNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getValueBytes()); + output.writeBytes(2, value_); } getUnknownFields().writeTo(output); } @@ -5706,7 +6852,7 @@ public final class HBaseProtos { } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getValueBytes()); + .computeBytesSize(2, value_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -5725,10 +6871,10 @@ public final class HBaseProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) obj; boolean result = true; result = result && (hasName() == other.hasName()); @@ -5762,41 +6908,41 @@ public final class HBaseProtos { return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -5805,7 +6951,7 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -5816,12 +6962,12 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -5831,7 +6977,7 @@ public final class HBaseProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -5844,18 +6990,18 @@ public final class HBaseProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPairOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameStringPair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameBytesPair_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameStringPair_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameBytesPair_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -5876,7 +7022,7 @@ public final class HBaseProtos { super.clear(); name_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - value_ = ""; + value_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -5887,24 +7033,24 @@ public final class HBaseProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -5912,8 +7058,8 @@ public final class HBaseProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { @@ -5930,16 +7076,16 @@ public final class HBaseProtos { } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.getDefaultInstance()) return this; if (other.hasName()) { setName(other.getName()); } @@ -5955,10 +7101,6 @@ public final class HBaseProtos { return false; } - if (!hasValue()) { - - return false; - } return true; } @@ -6037,22 +7179,15 @@ public final class HBaseProtos { onChanged(); } - // required string value = 2; - private java.lang.Object value_ = ""; + // optional bytes value = 2; + private com.google.protobuf.ByteString value_ = com.google.protobuf.ByteString.EMPTY; public boolean hasValue() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public String getValue() { - java.lang.Object ref = value_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - value_ = s; - return s; - } else { - return (String) ref; - } + public com.google.protobuf.ByteString getValue() { + return value_; } - public Builder setValue(String value) { + public Builder setValue(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } @@ -6067,115 +7202,92 @@ public final class HBaseProtos { onChanged(); return this; } - void setValue(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - value_ = value; - onChanged(); - } - // @@protoc_insertion_point(builder_scope:NameStringPair) + // @@protoc_insertion_point(builder_scope:NameBytesPair) } static { - defaultInstance = new NameStringPair(true); + defaultInstance = new NameBytesPair(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:NameStringPair) + // @@protoc_insertion_point(class_scope:NameBytesPair) } - public interface NameBytesPairOrBuilder + public interface BytesBytesPairOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required string name = 1; - boolean hasName(); - String getName(); + // required bytes first = 1; + boolean hasFirst(); + com.google.protobuf.ByteString getFirst(); - // optional bytes value = 2; - boolean hasValue(); - com.google.protobuf.ByteString getValue(); + // required bytes second = 2; + boolean hasSecond(); + com.google.protobuf.ByteString getSecond(); } - public static final class NameBytesPair extends + public static final class BytesBytesPair extends com.google.protobuf.GeneratedMessage - implements NameBytesPairOrBuilder { - // Use NameBytesPair.newBuilder() to construct. - private NameBytesPair(Builder builder) { + implements BytesBytesPairOrBuilder { + // Use BytesBytesPair.newBuilder() to construct. + private BytesBytesPair(Builder builder) { super(builder); } - private NameBytesPair(boolean noInit) {} + private BytesBytesPair(boolean noInit) {} - private static final NameBytesPair defaultInstance; - public static NameBytesPair getDefaultInstance() { + private static final BytesBytesPair defaultInstance; + public static BytesBytesPair getDefaultInstance() { return defaultInstance; } - public NameBytesPair getDefaultInstanceForType() { + public BytesBytesPair getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameBytesPair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_BytesBytesPair_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameBytesPair_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_BytesBytesPair_fieldAccessorTable; } private int bitField0_; - // required string name = 1; - public static final int NAME_FIELD_NUMBER = 1; - private java.lang.Object name_; - public boolean hasName() { + // required bytes first = 1; + public static final int FIRST_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString first_; + public boolean hasFirst() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public String getName() { - java.lang.Object ref = name_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - name_ = s; - } - return s; - } - } - private com.google.protobuf.ByteString getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public com.google.protobuf.ByteString getFirst() { + return first_; } - // optional bytes value = 2; - public static final int VALUE_FIELD_NUMBER = 2; - private com.google.protobuf.ByteString value_; - public boolean hasValue() { + // required bytes second = 2; + public static final int SECOND_FIELD_NUMBER = 2; + private com.google.protobuf.ByteString second_; + public boolean hasSecond() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public com.google.protobuf.ByteString getValue() { - return value_; + public com.google.protobuf.ByteString getSecond() { + return second_; } private void initFields() { - name_ = ""; - value_ = com.google.protobuf.ByteString.EMPTY; + first_ = com.google.protobuf.ByteString.EMPTY; + second_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasName()) { + if (!hasFirst()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSecond()) { memoizedIsInitialized = 0; return false; } @@ -6187,10 +7299,10 @@ public final class HBaseProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getNameBytes()); + output.writeBytes(1, first_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, value_); + output.writeBytes(2, second_); } getUnknownFields().writeTo(output); } @@ -6203,11 +7315,11 @@ public final class HBaseProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getNameBytes()); + .computeBytesSize(1, first_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, value_); + .computeBytesSize(2, second_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -6226,21 +7338,21 @@ public final class HBaseProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair) obj; boolean result = true; - result = result && (hasName() == other.hasName()); - if (hasName()) { - result = result && getName() - .equals(other.getName()); + result = result && (hasFirst() == other.hasFirst()); + if (hasFirst()) { + result = result && getFirst() + .equals(other.getFirst()); } - result = result && (hasValue() == other.hasValue()); - if (hasValue()) { - result = result && getValue() - .equals(other.getValue()); + result = result && (hasSecond() == other.hasSecond()); + if (hasSecond()) { + result = result && getSecond() + .equals(other.getSecond()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -6251,53 +7363,53 @@ public final class HBaseProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasName()) { - hash = (37 * hash) + NAME_FIELD_NUMBER; - hash = (53 * hash) + getName().hashCode(); + if (hasFirst()) { + hash = (37 * hash) + FIRST_FIELD_NUMBER; + hash = (53 * hash) + getFirst().hashCode(); } - if (hasValue()) { - hash = (37 * hash) + VALUE_FIELD_NUMBER; - hash = (53 * hash) + getValue().hashCode(); + if (hasSecond()) { + hash = (37 * hash) + SECOND_FIELD_NUMBER; + hash = (53 * hash) + getSecond().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -6306,7 +7418,7 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -6317,12 +7429,12 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -6332,7 +7444,7 @@ public final class HBaseProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -6345,18 +7457,18 @@ public final class HBaseProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPairOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameBytesPair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_BytesBytesPair_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameBytesPair_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_BytesBytesPair_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -6375,9 +7487,9 @@ public final class HBaseProtos { public Builder clear() { super.clear(); - name_ = ""; + first_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); - value_ = com.google.protobuf.ByteString.EMPTY; + second_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -6388,24 +7500,24 @@ public final class HBaseProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -6413,46 +7525,50 @@ public final class HBaseProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.name_ = name_; + result.first_ = first_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.value_ = value_; + result.second_ = second_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.getDefaultInstance()) return this; - if (other.hasName()) { - setName(other.getName()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance()) return this; + if (other.hasFirst()) { + setFirst(other.getFirst()); } - if (other.hasValue()) { - setValue(other.getValue()); + if (other.hasSecond()) { + setSecond(other.getSecond()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasName()) { + if (!hasFirst()) { + + return false; + } + if (!hasSecond()) { return false; } @@ -6484,12 +7600,12 @@ public final class HBaseProtos { } case 10: { bitField0_ |= 0x00000001; - name_ = input.readBytes(); + first_ = input.readBytes(); break; } case 18: { bitField0_ |= 0x00000002; - value_ = input.readBytes(); + second_ = input.readBytes(); break; } } @@ -6498,154 +7614,156 @@ public final class HBaseProtos { private int bitField0_; - // required string name = 1; - private java.lang.Object name_ = ""; - public boolean hasName() { + // required bytes first = 1; + private com.google.protobuf.ByteString first_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasFirst() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public String getName() { - java.lang.Object ref = name_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - name_ = s; - return s; - } else { - return (String) ref; - } + public com.google.protobuf.ByteString getFirst() { + return first_; } - public Builder setName(String value) { + public Builder setFirst(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - name_ = value; + first_ = value; onChanged(); return this; } - public Builder clearName() { + public Builder clearFirst() { bitField0_ = (bitField0_ & ~0x00000001); - name_ = getDefaultInstance().getName(); + first_ = getDefaultInstance().getFirst(); onChanged(); return this; } - void setName(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - name_ = value; - onChanged(); - } - // optional bytes value = 2; - private com.google.protobuf.ByteString value_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasValue() { + // required bytes second = 2; + private com.google.protobuf.ByteString second_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasSecond() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public com.google.protobuf.ByteString getValue() { - return value_; + public com.google.protobuf.ByteString getSecond() { + return second_; } - public Builder setValue(com.google.protobuf.ByteString value) { + public Builder setSecond(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000002; - value_ = value; + second_ = value; onChanged(); return this; } - public Builder clearValue() { + public Builder clearSecond() { bitField0_ = (bitField0_ & ~0x00000002); - value_ = getDefaultInstance().getValue(); + second_ = getDefaultInstance().getSecond(); onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:NameBytesPair) + // @@protoc_insertion_point(builder_scope:BytesBytesPair) } static { - defaultInstance = new NameBytesPair(true); + defaultInstance = new BytesBytesPair(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:NameBytesPair) + // @@protoc_insertion_point(class_scope:BytesBytesPair) } - public interface BytesBytesPairOrBuilder + public interface NameInt64PairOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bytes first = 1; - boolean hasFirst(); - com.google.protobuf.ByteString getFirst(); + // optional string name = 1; + boolean hasName(); + String getName(); - // required bytes second = 2; - boolean hasSecond(); - com.google.protobuf.ByteString getSecond(); + // optional int64 value = 2; + boolean hasValue(); + long getValue(); } - public static final class BytesBytesPair extends + public static final class NameInt64Pair extends com.google.protobuf.GeneratedMessage - implements BytesBytesPairOrBuilder { - // Use BytesBytesPair.newBuilder() to construct. - private BytesBytesPair(Builder builder) { + implements NameInt64PairOrBuilder { + // Use NameInt64Pair.newBuilder() to construct. + private NameInt64Pair(Builder builder) { super(builder); } - private BytesBytesPair(boolean noInit) {} + private NameInt64Pair(boolean noInit) {} - private static final BytesBytesPair defaultInstance; - public static BytesBytesPair getDefaultInstance() { + private static final NameInt64Pair defaultInstance; + public static NameInt64Pair getDefaultInstance() { return defaultInstance; } - public BytesBytesPair getDefaultInstanceForType() { + public NameInt64Pair getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_BytesBytesPair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameInt64Pair_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_BytesBytesPair_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameInt64Pair_fieldAccessorTable; } private int bitField0_; - // required bytes first = 1; - public static final int FIRST_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString first_; - public boolean hasFirst() { + // optional string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getFirst() { - return first_; + public String getName() { + java.lang.Object ref = name_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + name_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } - // required bytes second = 2; - public static final int SECOND_FIELD_NUMBER = 2; - private com.google.protobuf.ByteString second_; - public boolean hasSecond() { + // optional int64 value = 2; + public static final int VALUE_FIELD_NUMBER = 2; + private long value_; + public boolean hasValue() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public com.google.protobuf.ByteString getSecond() { - return second_; + public long getValue() { + return value_; } private void initFields() { - first_ = com.google.protobuf.ByteString.EMPTY; - second_ = com.google.protobuf.ByteString.EMPTY; + name_ = ""; + value_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasFirst()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasSecond()) { - memoizedIsInitialized = 0; - return false; - } memoizedIsInitialized = 1; return true; } @@ -6654,10 +7772,10 @@ public final class HBaseProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, first_); + output.writeBytes(1, getNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, second_); + output.writeInt64(2, value_); } getUnknownFields().writeTo(output); } @@ -6670,11 +7788,11 @@ public final class HBaseProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, first_); + .computeBytesSize(1, getNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, second_); + .computeInt64Size(2, value_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -6693,21 +7811,21 @@ public final class HBaseProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair) obj; boolean result = true; - result = result && (hasFirst() == other.hasFirst()); - if (hasFirst()) { - result = result && getFirst() - .equals(other.getFirst()); + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); } - result = result && (hasSecond() == other.hasSecond()); - if (hasSecond()) { - result = result && getSecond() - .equals(other.getSecond()); + result = result && (hasValue() == other.hasValue()); + if (hasValue()) { + result = result && (getValue() + == other.getValue()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -6718,53 +7836,53 @@ public final class HBaseProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasFirst()) { - hash = (37 * hash) + FIRST_FIELD_NUMBER; - hash = (53 * hash) + getFirst().hashCode(); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); } - if (hasSecond()) { - hash = (37 * hash) + SECOND_FIELD_NUMBER; - hash = (53 * hash) + getSecond().hashCode(); + if (hasValue()) { + hash = (37 * hash) + VALUE_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getValue()); } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -6773,7 +7891,7 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -6784,12 +7902,12 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -6799,7 +7917,7 @@ public final class HBaseProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -6812,18 +7930,18 @@ public final class HBaseProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPairOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64PairOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_BytesBytesPair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameInt64Pair_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_BytesBytesPair_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameInt64Pair_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -6842,9 +7960,9 @@ public final class HBaseProtos { public Builder clear() { super.clear(); - first_ = com.google.protobuf.ByteString.EMPTY; + name_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - second_ = com.google.protobuf.ByteString.EMPTY; + value_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -6855,24 +7973,24 @@ public final class HBaseProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -6880,53 +7998,45 @@ public final class HBaseProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.first_ = first_; + result.name_ = name_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.second_ = second_; + result.value_ = value_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.getDefaultInstance()) return this; - if (other.hasFirst()) { - setFirst(other.getFirst()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.getDefaultInstance()) return this; + if (other.hasName()) { + setName(other.getName()); } - if (other.hasSecond()) { - setSecond(other.getSecond()); + if (other.hasValue()) { + setValue(other.getValue()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasFirst()) { - - return false; - } - if (!hasSecond()) { - - return false; - } return true; } @@ -6955,12 +8065,12 @@ public final class HBaseProtos { } case 10: { bitField0_ |= 0x00000001; - first_ = input.readBytes(); + name_ = input.readBytes(); break; } - case 18: { + case 16: { bitField0_ |= 0x00000002; - second_ = input.readBytes(); + value_ = input.readInt64(); break; } } @@ -6969,106 +8079,196 @@ public final class HBaseProtos { private int bitField0_; - // required bytes first = 1; - private com.google.protobuf.ByteString first_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasFirst() { + // optional string name = 1; + private java.lang.Object name_ = ""; + public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getFirst() { - return first_; + public String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + name_ = s; + return s; + } else { + return (String) ref; + } } - public Builder setFirst(com.google.protobuf.ByteString value) { + public Builder setName(String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - first_ = value; + name_ = value; onChanged(); return this; } - public Builder clearFirst() { + public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000001); - first_ = getDefaultInstance().getFirst(); + name_ = getDefaultInstance().getName(); onChanged(); return this; } + void setName(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + } - // required bytes second = 2; - private com.google.protobuf.ByteString second_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasSecond() { + // optional int64 value = 2; + private long value_ ; + public boolean hasValue() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public com.google.protobuf.ByteString getSecond() { - return second_; + public long getValue() { + return value_; } - public Builder setSecond(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - second_ = value; + public Builder setValue(long value) { + bitField0_ |= 0x00000002; + value_ = value; onChanged(); return this; } - public Builder clearSecond() { + public Builder clearValue() { bitField0_ = (bitField0_ & ~0x00000002); - second_ = getDefaultInstance().getSecond(); + value_ = 0L; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:BytesBytesPair) - } - - static { - defaultInstance = new BytesBytesPair(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:BytesBytesPair) - } - - public interface NameInt64PairOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional string name = 1; - boolean hasName(); - String getName(); - - // optional int64 value = 2; - boolean hasValue(); - long getValue(); - } - public static final class NameInt64Pair extends - com.google.protobuf.GeneratedMessage - implements NameInt64PairOrBuilder { - // Use NameInt64Pair.newBuilder() to construct. - private NameInt64Pair(Builder builder) { - super(builder); - } - private NameInt64Pair(boolean noInit) {} - - private static final NameInt64Pair defaultInstance; - public static NameInt64Pair getDefaultInstance() { - return defaultInstance; - } - - public NameInt64Pair getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameInt64Pair_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameInt64Pair_fieldAccessorTable; + // @@protoc_insertion_point(builder_scope:NameInt64Pair) + } + + static { + defaultInstance = new NameInt64Pair(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:NameInt64Pair) + } + + public interface SnapshotDescriptionOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string name = 1; + boolean hasName(); + String getName(); + + // optional string table = 2; + boolean hasTable(); + String getTable(); + + // optional int64 creation_time = 3 [default = 0]; + boolean hasCreationTime(); + long getCreationTime(); + + // optional .SnapshotDescription.Type type = 4 [default = FLUSH]; + boolean hasType(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType(); + + // optional int32 version = 5; + boolean hasVersion(); + int getVersion(); + } + public static final class SnapshotDescription extends + com.google.protobuf.GeneratedMessage + implements SnapshotDescriptionOrBuilder { + // Use SnapshotDescription.newBuilder() to construct. + private SnapshotDescription(Builder builder) { + super(builder); + } + private SnapshotDescription(boolean noInit) {} + + private static final SnapshotDescription defaultInstance; + public static SnapshotDescription getDefaultInstance() { + return defaultInstance; + } + + public SnapshotDescription getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_SnapshotDescription_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_SnapshotDescription_fieldAccessorTable; + } + + public enum Type + implements com.google.protobuf.ProtocolMessageEnum { + DISABLED(0, 0), + FLUSH(1, 1), + ; + + public static final int DISABLED_VALUE = 0; + public static final int FLUSH_VALUE = 1; + + + public final int getNumber() { return value; } + + public static Type valueOf(int value) { + switch (value) { + case 0: return DISABLED; + case 1: return FLUSH; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDescriptor().getEnumTypes().get(0); + } + + private static final Type[] VALUES = { + DISABLED, FLUSH, + }; + + public static Type valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private Type(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:SnapshotDescription.Type) } private int bitField0_; - // optional string name = 1; + // required string name = 1; public static final int NAME_FIELD_NUMBER = 1; private java.lang.Object name_; public boolean hasName() { @@ -7100,25 +8300,84 @@ public final class HBaseProtos { } } - // optional int64 value = 2; - public static final int VALUE_FIELD_NUMBER = 2; - private long value_; - public boolean hasValue() { + // optional string table = 2; + public static final int TABLE_FIELD_NUMBER = 2; + private java.lang.Object table_; + public boolean hasTable() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public long getValue() { - return value_; + public String getTable() { + java.lang.Object ref = table_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + table_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional int64 creation_time = 3 [default = 0]; + public static final int CREATION_TIME_FIELD_NUMBER = 3; + private long creationTime_; + public boolean hasCreationTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + public long getCreationTime() { + return creationTime_; + } + + // optional .SnapshotDescription.Type type = 4 [default = FLUSH]; + public static final int TYPE_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type type_; + public boolean hasType() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType() { + return type_; + } + + // optional int32 version = 5; + public static final int VERSION_FIELD_NUMBER = 5; + private int version_; + public boolean hasVersion() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + public int getVersion() { + return version_; } private void initFields() { name_ = ""; - value_ = 0L; + table_ = ""; + creationTime_ = 0L; + type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.FLUSH; + version_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -7130,7 +8389,16 @@ public final class HBaseProtos { output.writeBytes(1, getNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeInt64(2, value_); + output.writeBytes(2, getTableBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt64(3, creationTime_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeEnum(4, type_.getNumber()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeInt32(5, version_); } getUnknownFields().writeTo(output); } @@ -7147,7 +8415,19 @@ public final class HBaseProtos { } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeInt64Size(2, value_); + .computeBytesSize(2, getTableBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(3, creationTime_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(4, type_.getNumber()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeInt32Size(5, version_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -7166,10 +8446,10 @@ public final class HBaseProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription) obj; boolean result = true; result = result && (hasName() == other.hasName()); @@ -7177,10 +8457,25 @@ public final class HBaseProtos { result = result && getName() .equals(other.getName()); } - result = result && (hasValue() == other.hasValue()); - if (hasValue()) { - result = result && (getValue() - == other.getValue()); + result = result && (hasTable() == other.hasTable()); + if (hasTable()) { + result = result && getTable() + .equals(other.getTable()); + } + result = result && (hasCreationTime() == other.hasCreationTime()); + if (hasCreationTime()) { + result = result && (getCreationTime() + == other.getCreationTime()); + } + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); + } + result = result && (hasVersion() == other.hasVersion()); + if (hasVersion()) { + result = result && (getVersion() + == other.getVersion()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -7195,49 +8490,61 @@ public final class HBaseProtos { hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); } - if (hasValue()) { - hash = (37 * hash) + VALUE_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getValue()); + if (hasTable()) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + } + if (hasCreationTime()) { + hash = (37 * hash) + CREATION_TIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getCreationTime()); + } + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); + } + if (hasVersion()) { + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion(); } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -7246,7 +8553,7 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -7257,12 +8564,12 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -7272,7 +8579,7 @@ public final class HBaseProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -7285,18 +8592,18 @@ public final class HBaseProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64PairOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameInt64Pair_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_SnapshotDescription_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NameInt64Pair_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_SnapshotDescription_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -7317,8 +8624,14 @@ public final class HBaseProtos { super.clear(); name_ = ""; bitField0_ = (bitField0_ & ~0x00000001); - value_ = 0L; + table_ = ""; bitField0_ = (bitField0_ & ~0x00000002); + creationTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.FLUSH; + bitField0_ = (bitField0_ & ~0x00000008); + version_ = 0; + bitField0_ = (bitField0_ & ~0x00000010); return this; } @@ -7328,24 +8641,24 @@ public final class HBaseProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -7353,8 +8666,8 @@ public final class HBaseProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { @@ -7364,34 +8677,59 @@ public final class HBaseProtos { if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - result.value_ = value_; + result.table_ = table_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.creationTime_ = creationTime_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.type_ = type_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.version_ = version_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance()) return this; if (other.hasName()) { setName(other.getName()); } - if (other.hasValue()) { - setValue(other.getValue()); + if (other.hasTable()) { + setTable(other.getTable()); + } + if (other.hasCreationTime()) { + setCreationTime(other.getCreationTime()); + } + if (other.hasType()) { + setType(other.getType()); + } + if (other.hasVersion()) { + setVersion(other.getVersion()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + if (!hasName()) { + + return false; + } return true; } @@ -7423,9 +8761,30 @@ public final class HBaseProtos { name_ = input.readBytes(); break; } - case 16: { + case 18: { bitField0_ |= 0x00000002; - value_ = input.readInt64(); + table_ = input.readBytes(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + creationTime_ = input.readInt64(); + break; + } + case 32: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(4, rawValue); + } else { + bitField0_ |= 0x00000008; + type_ = value; + } + break; + } + case 40: { + bitField0_ |= 0x00000010; + version_ = input.readInt32(); break; } } @@ -7434,7 +8793,7 @@ public final class HBaseProtos { private int bitField0_; - // optional string name = 1; + // required string name = 1; private java.lang.Object name_ = ""; public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); @@ -7470,269 +8829,157 @@ public final class HBaseProtos { onChanged(); } - // optional int64 value = 2; - private long value_ ; - public boolean hasValue() { + // optional string table = 2; + private java.lang.Object table_ = ""; + public boolean hasTable() { return ((bitField0_ & 0x00000002) == 0x00000002); } - public long getValue() { - return value_; + public String getTable() { + java.lang.Object ref = table_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + table_ = s; + return s; + } else { + return (String) ref; + } } - public Builder setValue(long value) { - bitField0_ |= 0x00000002; - value_ = value; + public Builder setTable(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + table_ = value; onChanged(); return this; } - public Builder clearValue() { + public Builder clearTable() { bitField0_ = (bitField0_ & ~0x00000002); - value_ = 0L; + table_ = getDefaultInstance().getTable(); onChanged(); return this; } - - // @@protoc_insertion_point(builder_scope:NameInt64Pair) - } - - static { - defaultInstance = new NameInt64Pair(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:NameInt64Pair) - } - - public interface SnapshotDescriptionOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required string name = 1; - boolean hasName(); - String getName(); - - // optional string table = 2; - boolean hasTable(); - String getTable(); - - // optional int64 creation_time = 3 [default = 0]; - boolean hasCreationTime(); - long getCreationTime(); - - // optional .SnapshotDescription.Type type = 4 [default = FLUSH]; - boolean hasType(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType(); - - // optional int32 version = 5; - boolean hasVersion(); - int getVersion(); - } - public static final class SnapshotDescription extends - com.google.protobuf.GeneratedMessage - implements SnapshotDescriptionOrBuilder { - // Use SnapshotDescription.newBuilder() to construct. - private SnapshotDescription(Builder builder) { - super(builder); - } - private SnapshotDescription(boolean noInit) {} - - private static final SnapshotDescription defaultInstance; - public static SnapshotDescription getDefaultInstance() { - return defaultInstance; - } - - public SnapshotDescription getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_SnapshotDescription_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_SnapshotDescription_fieldAccessorTable; - } - - public enum Type - implements com.google.protobuf.ProtocolMessageEnum { - DISABLED(0, 0), - FLUSH(1, 1), - ; - - public static final int DISABLED_VALUE = 0; - public static final int FLUSH_VALUE = 1; - - - public final int getNumber() { return value; } - - public static Type valueOf(int value) { - switch (value) { - case 0: return DISABLED; - case 1: return FLUSH; - default: return null; - } + void setTable(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000002; + table_ = value; + onChanged(); } - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; + // optional int64 creation_time = 3 [default = 0]; + private long creationTime_ ; + public boolean hasCreationTime() { + return ((bitField0_ & 0x00000004) == 0x00000004); } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public Type findValueByNumber(int number) { - return Type.valueOf(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); + public long getCreationTime() { + return creationTime_; } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); + public Builder setCreationTime(long value) { + bitField0_ |= 0x00000004; + creationTime_ = value; + onChanged(); + return this; } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDescriptor().getEnumTypes().get(0); + public Builder clearCreationTime() { + bitField0_ = (bitField0_ & ~0x00000004); + creationTime_ = 0L; + onChanged(); + return this; } - private static final Type[] VALUES = { - DISABLED, FLUSH, - }; - - public static Type valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); + // optional .SnapshotDescription.Type type = 4 [default = FLUSH]; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.FLUSH; + public boolean hasType() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType() { + return type_; + } + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type value) { + if (value == null) { + throw new NullPointerException(); } - return VALUES[desc.getIndex()]; + bitField0_ |= 0x00000008; + type_ = value; + onChanged(); + return this; } - - private final int index; - private final int value; - - private Type(int index, int value) { - this.index = index; - this.value = value; + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000008); + type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.FLUSH; + onChanged(); + return this; } - // @@protoc_insertion_point(enum_scope:SnapshotDescription.Type) - } - - private int bitField0_; - // required string name = 1; - public static final int NAME_FIELD_NUMBER = 1; - private java.lang.Object name_; - public boolean hasName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getName() { - java.lang.Object ref = name_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - name_ = s; - } - return s; + // optional int32 version = 5; + private int version_ ; + public boolean hasVersion() { + return ((bitField0_ & 0x00000010) == 0x00000010); } - } - private com.google.protobuf.ByteString getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + public int getVersion() { + return version_; } - } - - // optional string table = 2; - public static final int TABLE_FIELD_NUMBER = 2; - private java.lang.Object table_; - public boolean hasTable() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getTable() { - java.lang.Object ref = table_; - if (ref instanceof String) { - return (String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - String s = bs.toStringUtf8(); - if (com.google.protobuf.Internal.isValidUtf8(bs)) { - table_ = s; - } - return s; + public Builder setVersion(int value) { + bitField0_ |= 0x00000010; + version_ = value; + onChanged(); + return this; } - } - private com.google.protobuf.ByteString getTableBytes() { - java.lang.Object ref = table_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((String) ref); - table_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; + public Builder clearVersion() { + bitField0_ = (bitField0_ & ~0x00000010); + version_ = 0; + onChanged(); + return this; } + + // @@protoc_insertion_point(builder_scope:SnapshotDescription) } - // optional int64 creation_time = 3 [default = 0]; - public static final int CREATION_TIME_FIELD_NUMBER = 3; - private long creationTime_; - public boolean hasCreationTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); + static { + defaultInstance = new SnapshotDescription(true); + defaultInstance.initFields(); } - public long getCreationTime() { - return creationTime_; + + // @@protoc_insertion_point(class_scope:SnapshotDescription) + } + + public interface EmptyMsgOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class EmptyMsg extends + com.google.protobuf.GeneratedMessage + implements EmptyMsgOrBuilder { + // Use EmptyMsg.newBuilder() to construct. + private EmptyMsg(Builder builder) { + super(builder); } + private EmptyMsg(boolean noInit) {} - // optional .SnapshotDescription.Type type = 4 [default = FLUSH]; - public static final int TYPE_FIELD_NUMBER = 4; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type type_; - public boolean hasType() { - return ((bitField0_ & 0x00000008) == 0x00000008); + private static final EmptyMsg defaultInstance; + public static EmptyMsg getDefaultInstance() { + return defaultInstance; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType() { - return type_; + + public EmptyMsg getDefaultInstanceForType() { + return defaultInstance; } - // optional int32 version = 5; - public static final int VERSION_FIELD_NUMBER = 5; - private int version_; - public boolean hasVersion() { - return ((bitField0_ & 0x00000010) == 0x00000010); + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_EmptyMsg_descriptor; } - public int getVersion() { - return version_; + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_EmptyMsg_fieldAccessorTable; } private void initFields() { - name_ = ""; - table_ = ""; - creationTime_ = 0L; - type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.FLUSH; - version_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasName()) { - memoizedIsInitialized = 0; - return false; - } memoizedIsInitialized = 1; return true; } @@ -7740,21 +8987,6 @@ public final class HBaseProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getTableBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeInt64(3, creationTime_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeEnum(4, type_.getNumber()); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeInt32(5, version_); - } getUnknownFields().writeTo(output); } @@ -7764,26 +8996,6 @@ public final class HBaseProtos { if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getNameBytes()); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getTableBytes()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(3, creationTime_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(4, type_.getNumber()); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(5, version_); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -7801,37 +9013,12 @@ public final class HBaseProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg) obj; boolean result = true; - result = result && (hasName() == other.hasName()); - if (hasName()) { - result = result && getName() - .equals(other.getName()); - } - result = result && (hasTable() == other.hasTable()); - if (hasTable()) { - result = result && getTable() - .equals(other.getTable()); - } - result = result && (hasCreationTime() == other.hasCreationTime()); - if (hasCreationTime()) { - result = result && (getCreationTime() - == other.getCreationTime()); - } - result = result && (hasType() == other.hasType()); - if (hasType()) { - result = result && - (getType() == other.getType()); - } - result = result && (hasVersion() == other.hasVersion()); - if (hasVersion()) { - result = result && (getVersion() - == other.getVersion()); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -7841,65 +9028,45 @@ public final class HBaseProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasName()) { - hash = (37 * hash) + NAME_FIELD_NUMBER; - hash = (53 * hash) + getName().hashCode(); - } - if (hasTable()) { - hash = (37 * hash) + TABLE_FIELD_NUMBER; - hash = (53 * hash) + getTable().hashCode(); - } - if (hasCreationTime()) { - hash = (37 * hash) + CREATION_TIME_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getCreationTime()); - } - if (hasType()) { - hash = (37 * hash) + TYPE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getType()); - } - if (hasVersion()) { - hash = (37 * hash) + VERSION_FIELD_NUMBER; - hash = (53 * hash) + getVersion(); - } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -7908,7 +9075,7 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -7919,12 +9086,12 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -7934,7 +9101,7 @@ public final class HBaseProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -7947,18 +9114,18 @@ public final class HBaseProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsgOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_SnapshotDescription_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_EmptyMsg_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_SnapshotDescription_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_EmptyMsg_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -7977,364 +9144,162 @@ public final class HBaseProtos { public Builder clear() { super.clear(); - name_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - table_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - creationTime_ = 0L; - bitField0_ = (bitField0_ & ~0x00000004); - type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.FLUSH; - bitField0_ = (bitField0_ & ~0x00000008); - version_ = 0; - bitField0_ = (bitField0_ & ~0x00000010); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDescriptor(); - } - - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.name_ = name_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.table_ = table_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.creationTime_ = creationTime_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.type_ = type_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.version_ = version_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance()) return this; - if (other.hasName()) { - setName(other.getName()); - } - if (other.hasTable()) { - setTable(other.getTable()); - } - if (other.hasCreationTime()) { - setCreationTime(other.getCreationTime()); - } - if (other.hasType()) { - setType(other.getType()); - } - if (other.hasVersion()) { - setVersion(other.getVersion()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasName()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - name_ = input.readBytes(); - break; - } - case 18: { - bitField0_ |= 0x00000002; - table_ = input.readBytes(); - break; - } - case 24: { - bitField0_ |= 0x00000004; - creationTime_ = input.readInt64(); - break; - } - case 32: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(4, rawValue); - } else { - bitField0_ |= 0x00000008; - type_ = value; - } - break; - } - case 40: { - bitField0_ |= 0x00000010; - version_ = input.readInt32(); - break; - } - } - } - } - - private int bitField0_; - - // required string name = 1; - private java.lang.Object name_ = ""; - public boolean hasName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public String getName() { - java.lang.Object ref = name_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - name_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setName(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - name_ = value; - onChanged(); - return this; - } - public Builder clearName() { - bitField0_ = (bitField0_ & ~0x00000001); - name_ = getDefaultInstance().getName(); - onChanged(); - return this; - } - void setName(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000001; - name_ = value; - onChanged(); - } - - // optional string table = 2; - private java.lang.Object table_ = ""; - public boolean hasTable() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public String getTable() { - java.lang.Object ref = table_; - if (!(ref instanceof String)) { - String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); - table_ = s; - return s; - } else { - return (String) ref; - } - } - public Builder setTable(String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - table_ = value; - onChanged(); - return this; - } - public Builder clearTable() { - bitField0_ = (bitField0_ & ~0x00000002); - table_ = getDefaultInstance().getTable(); - onChanged(); - return this; - } - void setTable(com.google.protobuf.ByteString value) { - bitField0_ |= 0x00000002; - table_ = value; - onChanged(); - } - - // optional int64 creation_time = 3 [default = 0]; - private long creationTime_ ; - public boolean hasCreationTime() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - public long getCreationTime() { - return creationTime_; - } - public Builder setCreationTime(long value) { - bitField0_ |= 0x00000004; - creationTime_ = value; - onChanged(); return this; } - public Builder clearCreationTime() { - bitField0_ = (bitField0_ & ~0x00000004); - creationTime_ = 0L; - onChanged(); - return this; + + public Builder clone() { + return create().mergeFrom(buildPartial()); } - // optional .SnapshotDescription.Type type = 4 [default = FLUSH]; - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.FLUSH; - public boolean hasType() { - return ((bitField0_ & 0x00000008) == 0x00000008); + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type getType() { - return type_; + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg.getDefaultInstance(); } - public Builder setType(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type value) { - if (value == null) { - throw new NullPointerException(); + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); } - bitField0_ |= 0x00000008; - type_ = value; - onChanged(); - return this; + return result; } - public Builder clearType() { - bitField0_ = (bitField0_ & ~0x00000008); - type_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type.FLUSH; - onChanged(); - return this; + + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; } - // optional int32 version = 5; - private int version_ ; - public boolean hasVersion() { - return ((bitField0_ & 0x00000010) == 0x00000010); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg(this); + onBuilt(); + return result; } - public int getVersion() { - return version_; + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg)other); + } else { + super.mergeFrom(other); + return this; + } } - public Builder setVersion(int value) { - bitField0_ |= 0x00000010; - version_ = value; - onChanged(); + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); return this; } - public Builder clearVersion() { - bitField0_ = (bitField0_ & ~0x00000010); - version_ = 0; - onChanged(); - return this; + + public final boolean isInitialized() { + return true; } - // @@protoc_insertion_point(builder_scope:SnapshotDescription) + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:EmptyMsg) } static { - defaultInstance = new SnapshotDescription(true); + defaultInstance = new EmptyMsg(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:SnapshotDescription) + // @@protoc_insertion_point(class_scope:EmptyMsg) } - public interface EmptyMsgOrBuilder + public interface LongMsgOrBuilder extends com.google.protobuf.MessageOrBuilder { + + // required int64 long_msg = 1; + boolean hasLongMsg(); + long getLongMsg(); } - public static final class EmptyMsg extends + public static final class LongMsg extends com.google.protobuf.GeneratedMessage - implements EmptyMsgOrBuilder { - // Use EmptyMsg.newBuilder() to construct. - private EmptyMsg(Builder builder) { + implements LongMsgOrBuilder { + // Use LongMsg.newBuilder() to construct. + private LongMsg(Builder builder) { super(builder); } - private EmptyMsg(boolean noInit) {} + private LongMsg(boolean noInit) {} - private static final EmptyMsg defaultInstance; - public static EmptyMsg getDefaultInstance() { + private static final LongMsg defaultInstance; + public static LongMsg getDefaultInstance() { return defaultInstance; } - public EmptyMsg getDefaultInstanceForType() { + public LongMsg getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_EmptyMsg_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_LongMsg_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_EmptyMsg_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_LongMsg_fieldAccessorTable; + } + + private int bitField0_; + // required int64 long_msg = 1; + public static final int LONG_MSG_FIELD_NUMBER = 1; + private long longMsg_; + public boolean hasLongMsg() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public long getLongMsg() { + return longMsg_; } private void initFields() { + longMsg_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (!hasLongMsg()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -8342,6 +9307,9 @@ public final class HBaseProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeInt64(1, longMsg_); + } getUnknownFields().writeTo(output); } @@ -8351,6 +9319,10 @@ public final class HBaseProtos { if (size != -1) return size; size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(1, longMsg_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -8368,12 +9340,17 @@ public final class HBaseProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg) obj; boolean result = true; + result = result && (hasLongMsg() == other.hasLongMsg()); + if (hasLongMsg()) { + result = result && (getLongMsg() + == other.getLongMsg()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -8383,45 +9360,49 @@ public final class HBaseProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasLongMsg()) { + hash = (37 * hash) + LONG_MSG_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getLongMsg()); + } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -8430,7 +9411,7 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -8441,12 +9422,12 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -8456,7 +9437,7 @@ public final class HBaseProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -8469,18 +9450,18 @@ public final class HBaseProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsgOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsgOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_EmptyMsg_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_LongMsg_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_EmptyMsg_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_LongMsg_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -8499,6 +9480,8 @@ public final class HBaseProtos { public Builder clear() { super.clear(); + longMsg_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -8508,24 +9491,24 @@ public final class HBaseProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -8533,28 +9516,42 @@ public final class HBaseProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.longMsg_ = longMsg_; + result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg.getDefaultInstance()) return this; + if (other.hasLongMsg()) { + setLongMsg(other.getLongMsg()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + if (!hasLongMsg()) { + + return false; + } return true; } @@ -8581,77 +9578,104 @@ public final class HBaseProtos { } break; } + case 8: { + bitField0_ |= 0x00000001; + longMsg_ = input.readInt64(); + break; + } } } } + private int bitField0_; + + // required int64 long_msg = 1; + private long longMsg_ ; + public boolean hasLongMsg() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public long getLongMsg() { + return longMsg_; + } + public Builder setLongMsg(long value) { + bitField0_ |= 0x00000001; + longMsg_ = value; + onChanged(); + return this; + } + public Builder clearLongMsg() { + bitField0_ = (bitField0_ & ~0x00000001); + longMsg_ = 0L; + onChanged(); + return this; + } - // @@protoc_insertion_point(builder_scope:EmptyMsg) + // @@protoc_insertion_point(builder_scope:LongMsg) } static { - defaultInstance = new EmptyMsg(true); + defaultInstance = new LongMsg(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:EmptyMsg) + // @@protoc_insertion_point(class_scope:LongMsg) } - public interface LongMsgOrBuilder + public interface BigDecimalMsgOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required int64 long_msg = 1; - boolean hasLongMsg(); - long getLongMsg(); + // required bytes bigdecimal_msg = 1; + boolean hasBigdecimalMsg(); + com.google.protobuf.ByteString getBigdecimalMsg(); } - public static final class LongMsg extends + public static final class BigDecimalMsg extends com.google.protobuf.GeneratedMessage - implements LongMsgOrBuilder { - // Use LongMsg.newBuilder() to construct. - private LongMsg(Builder builder) { + implements BigDecimalMsgOrBuilder { + // Use BigDecimalMsg.newBuilder() to construct. + private BigDecimalMsg(Builder builder) { super(builder); } - private LongMsg(boolean noInit) {} + private BigDecimalMsg(boolean noInit) {} - private static final LongMsg defaultInstance; - public static LongMsg getDefaultInstance() { + private static final BigDecimalMsg defaultInstance; + public static BigDecimalMsg getDefaultInstance() { return defaultInstance; } - public LongMsg getDefaultInstanceForType() { + public BigDecimalMsg getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_LongMsg_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_BigDecimalMsg_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_LongMsg_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_BigDecimalMsg_fieldAccessorTable; } private int bitField0_; - // required int64 long_msg = 1; - public static final int LONG_MSG_FIELD_NUMBER = 1; - private long longMsg_; - public boolean hasLongMsg() { + // required bytes bigdecimal_msg = 1; + public static final int BIGDECIMAL_MSG_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString bigdecimalMsg_; + public boolean hasBigdecimalMsg() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public long getLongMsg() { - return longMsg_; + public com.google.protobuf.ByteString getBigdecimalMsg() { + return bigdecimalMsg_; } private void initFields() { - longMsg_ = 0L; + bigdecimalMsg_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasLongMsg()) { + if (!hasBigdecimalMsg()) { memoizedIsInitialized = 0; return false; } @@ -8663,7 +9687,7 @@ public final class HBaseProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeInt64(1, longMsg_); + output.writeBytes(1, bigdecimalMsg_); } getUnknownFields().writeTo(output); } @@ -8676,7 +9700,7 @@ public final class HBaseProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeInt64Size(1, longMsg_); + .computeBytesSize(1, bigdecimalMsg_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -8695,16 +9719,16 @@ public final class HBaseProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg) obj; boolean result = true; - result = result && (hasLongMsg() == other.hasLongMsg()); - if (hasLongMsg()) { - result = result && (getLongMsg() - == other.getLongMsg()); + result = result && (hasBigdecimalMsg() == other.hasBigdecimalMsg()); + if (hasBigdecimalMsg()) { + result = result && getBigdecimalMsg() + .equals(other.getBigdecimalMsg()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -8715,49 +9739,49 @@ public final class HBaseProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasLongMsg()) { - hash = (37 * hash) + LONG_MSG_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getLongMsg()); + if (hasBigdecimalMsg()) { + hash = (37 * hash) + BIGDECIMAL_MSG_FIELD_NUMBER; + hash = (53 * hash) + getBigdecimalMsg().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -8766,7 +9790,7 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -8777,12 +9801,12 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -8792,7 +9816,7 @@ public final class HBaseProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -8805,18 +9829,18 @@ public final class HBaseProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsgOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsgOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_LongMsg_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_BigDecimalMsg_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_LongMsg_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_BigDecimalMsg_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -8835,7 +9859,7 @@ public final class HBaseProtos { public Builder clear() { super.clear(); - longMsg_ = 0L; + bigdecimalMsg_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -8846,24 +9870,24 @@ public final class HBaseProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -8871,39 +9895,39 @@ public final class HBaseProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.longMsg_ = longMsg_; + result.bigdecimalMsg_ = bigdecimalMsg_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg.getDefaultInstance()) return this; - if (other.hasLongMsg()) { - setLongMsg(other.getLongMsg()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg.getDefaultInstance()) return this; + if (other.hasBigdecimalMsg()) { + setBigdecimalMsg(other.getBigdecimalMsg()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasLongMsg()) { + if (!hasBigdecimalMsg()) { return false; } @@ -8933,9 +9957,9 @@ public final class HBaseProtos { } break; } - case 8: { + case 10: { bitField0_ |= 0x00000001; - longMsg_ = input.readInt64(); + bigdecimalMsg_ = input.readBytes(); break; } } @@ -8944,93 +9968,115 @@ public final class HBaseProtos { private int bitField0_; - // required int64 long_msg = 1; - private long longMsg_ ; - public boolean hasLongMsg() { + // required bytes bigdecimal_msg = 1; + private com.google.protobuf.ByteString bigdecimalMsg_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasBigdecimalMsg() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public long getLongMsg() { - return longMsg_; + public com.google.protobuf.ByteString getBigdecimalMsg() { + return bigdecimalMsg_; } - public Builder setLongMsg(long value) { - bitField0_ |= 0x00000001; - longMsg_ = value; + public Builder setBigdecimalMsg(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + bigdecimalMsg_ = value; onChanged(); return this; } - public Builder clearLongMsg() { + public Builder clearBigdecimalMsg() { bitField0_ = (bitField0_ & ~0x00000001); - longMsg_ = 0L; + bigdecimalMsg_ = getDefaultInstance().getBigdecimalMsg(); onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:LongMsg) + // @@protoc_insertion_point(builder_scope:BigDecimalMsg) } static { - defaultInstance = new LongMsg(true); + defaultInstance = new BigDecimalMsg(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:LongMsg) + // @@protoc_insertion_point(class_scope:BigDecimalMsg) } - public interface BigDecimalMsgOrBuilder + public interface UUIDOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bytes bigdecimal_msg = 1; - boolean hasBigdecimalMsg(); - com.google.protobuf.ByteString getBigdecimalMsg(); + // required uint64 least_sig_bits = 1; + boolean hasLeastSigBits(); + long getLeastSigBits(); + + // required uint64 most_sig_bits = 2; + boolean hasMostSigBits(); + long getMostSigBits(); } - public static final class BigDecimalMsg extends + public static final class UUID extends com.google.protobuf.GeneratedMessage - implements BigDecimalMsgOrBuilder { - // Use BigDecimalMsg.newBuilder() to construct. - private BigDecimalMsg(Builder builder) { + implements UUIDOrBuilder { + // Use UUID.newBuilder() to construct. + private UUID(Builder builder) { super(builder); } - private BigDecimalMsg(boolean noInit) {} + private UUID(boolean noInit) {} - private static final BigDecimalMsg defaultInstance; - public static BigDecimalMsg getDefaultInstance() { + private static final UUID defaultInstance; + public static UUID getDefaultInstance() { return defaultInstance; } - public BigDecimalMsg getDefaultInstanceForType() { + public UUID getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_BigDecimalMsg_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_UUID_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_BigDecimalMsg_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_UUID_fieldAccessorTable; } private int bitField0_; - // required bytes bigdecimal_msg = 1; - public static final int BIGDECIMAL_MSG_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString bigdecimalMsg_; - public boolean hasBigdecimalMsg() { + // required uint64 least_sig_bits = 1; + public static final int LEAST_SIG_BITS_FIELD_NUMBER = 1; + private long leastSigBits_; + public boolean hasLeastSigBits() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getBigdecimalMsg() { - return bigdecimalMsg_; + public long getLeastSigBits() { + return leastSigBits_; + } + + // required uint64 most_sig_bits = 2; + public static final int MOST_SIG_BITS_FIELD_NUMBER = 2; + private long mostSigBits_; + public boolean hasMostSigBits() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getMostSigBits() { + return mostSigBits_; } private void initFields() { - bigdecimalMsg_ = com.google.protobuf.ByteString.EMPTY; + leastSigBits_ = 0L; + mostSigBits_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasBigdecimalMsg()) { + if (!hasLeastSigBits()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasMostSigBits()) { memoizedIsInitialized = 0; return false; } @@ -9042,7 +10088,10 @@ public final class HBaseProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, bigdecimalMsg_); + output.writeUInt64(1, leastSigBits_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, mostSigBits_); } getUnknownFields().writeTo(output); } @@ -9055,7 +10104,11 @@ public final class HBaseProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, bigdecimalMsg_); + .computeUInt64Size(1, leastSigBits_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, mostSigBits_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -9074,16 +10127,21 @@ public final class HBaseProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID) obj; boolean result = true; - result = result && (hasBigdecimalMsg() == other.hasBigdecimalMsg()); - if (hasBigdecimalMsg()) { - result = result && getBigdecimalMsg() - .equals(other.getBigdecimalMsg()); + result = result && (hasLeastSigBits() == other.hasLeastSigBits()); + if (hasLeastSigBits()) { + result = result && (getLeastSigBits() + == other.getLeastSigBits()); + } + result = result && (hasMostSigBits() == other.hasMostSigBits()); + if (hasMostSigBits()) { + result = result && (getMostSigBits() + == other.getMostSigBits()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -9094,49 +10152,53 @@ public final class HBaseProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBigdecimalMsg()) { - hash = (37 * hash) + BIGDECIMAL_MSG_FIELD_NUMBER; - hash = (53 * hash) + getBigdecimalMsg().hashCode(); + if (hasLeastSigBits()) { + hash = (37 * hash) + LEAST_SIG_BITS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getLeastSigBits()); + } + if (hasMostSigBits()) { + hash = (37 * hash) + MOST_SIG_BITS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getMostSigBits()); } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -9145,7 +10207,7 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -9156,12 +10218,12 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -9171,7 +10233,7 @@ public final class HBaseProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -9184,18 +10246,18 @@ public final class HBaseProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsgOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_BigDecimalMsg_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_UUID_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_BigDecimalMsg_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_UUID_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -9214,8 +10276,10 @@ public final class HBaseProtos { public Builder clear() { super.clear(); - bigdecimalMsg_ = com.google.protobuf.ByteString.EMPTY; + leastSigBits_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); + mostSigBits_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -9225,24 +10289,24 @@ public final class HBaseProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -9250,39 +10314,50 @@ public final class HBaseProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.bigdecimalMsg_ = bigdecimalMsg_; + result.leastSigBits_ = leastSigBits_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.mostSigBits_ = mostSigBits_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg.getDefaultInstance()) return this; - if (other.hasBigdecimalMsg()) { - setBigdecimalMsg(other.getBigdecimalMsg()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance()) return this; + if (other.hasLeastSigBits()) { + setLeastSigBits(other.getLeastSigBits()); + } + if (other.hasMostSigBits()) { + setMostSigBits(other.getMostSigBits()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasBigdecimalMsg()) { + if (!hasLeastSigBits()) { + + return false; + } + if (!hasMostSigBits()) { return false; } @@ -9312,9 +10387,14 @@ public final class HBaseProtos { } break; } - case 10: { + case 8: { bitField0_ |= 0x00000001; - bigdecimalMsg_ = input.readBytes(); + leastSigBits_ = input.readUInt64(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + mostSigBits_ = input.readUInt64(); break; } } @@ -9323,117 +10403,154 @@ public final class HBaseProtos { private int bitField0_; - // required bytes bigdecimal_msg = 1; - private com.google.protobuf.ByteString bigdecimalMsg_ = com.google.protobuf.ByteString.EMPTY; - public boolean hasBigdecimalMsg() { + // required uint64 least_sig_bits = 1; + private long leastSigBits_ ; + public boolean hasLeastSigBits() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getBigdecimalMsg() { - return bigdecimalMsg_; + public long getLeastSigBits() { + return leastSigBits_; } - public Builder setBigdecimalMsg(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - bigdecimalMsg_ = value; + public Builder setLeastSigBits(long value) { + bitField0_ |= 0x00000001; + leastSigBits_ = value; onChanged(); return this; } - public Builder clearBigdecimalMsg() { + public Builder clearLeastSigBits() { bitField0_ = (bitField0_ & ~0x00000001); - bigdecimalMsg_ = getDefaultInstance().getBigdecimalMsg(); + leastSigBits_ = 0L; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:BigDecimalMsg) + // required uint64 most_sig_bits = 2; + private long mostSigBits_ ; + public boolean hasMostSigBits() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + public long getMostSigBits() { + return mostSigBits_; + } + public Builder setMostSigBits(long value) { + bitField0_ |= 0x00000002; + mostSigBits_ = value; + onChanged(); + return this; + } + public Builder clearMostSigBits() { + bitField0_ = (bitField0_ & ~0x00000002); + mostSigBits_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:UUID) } static { - defaultInstance = new BigDecimalMsg(true); + defaultInstance = new UUID(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:BigDecimalMsg) + // @@protoc_insertion_point(class_scope:UUID) } - public interface UUIDOrBuilder + public interface NamespaceDescriptorOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required uint64 least_sig_bits = 1; - boolean hasLeastSigBits(); - long getLeastSigBits(); + // required bytes name = 1; + boolean hasName(); + com.google.protobuf.ByteString getName(); - // required uint64 most_sig_bits = 2; - boolean hasMostSigBits(); - long getMostSigBits(); + // repeated .NameStringPair configuration = 2; + java.util.List + getConfigurationList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index); + int getConfigurationCount(); + java.util.List + getConfigurationOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( + int index); } - public static final class UUID extends + public static final class NamespaceDescriptor extends com.google.protobuf.GeneratedMessage - implements UUIDOrBuilder { - // Use UUID.newBuilder() to construct. - private UUID(Builder builder) { + implements NamespaceDescriptorOrBuilder { + // Use NamespaceDescriptor.newBuilder() to construct. + private NamespaceDescriptor(Builder builder) { super(builder); } - private UUID(boolean noInit) {} + private NamespaceDescriptor(boolean noInit) {} - private static final UUID defaultInstance; - public static UUID getDefaultInstance() { + private static final NamespaceDescriptor defaultInstance; + public static NamespaceDescriptor getDefaultInstance() { return defaultInstance; } - public UUID getDefaultInstanceForType() { + public NamespaceDescriptor getDefaultInstanceForType() { return defaultInstance; } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_UUID_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NamespaceDescriptor_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_UUID_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NamespaceDescriptor_fieldAccessorTable; } private int bitField0_; - // required uint64 least_sig_bits = 1; - public static final int LEAST_SIG_BITS_FIELD_NUMBER = 1; - private long leastSigBits_; - public boolean hasLeastSigBits() { + // required bytes name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString name_; + public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public long getLeastSigBits() { - return leastSigBits_; + public com.google.protobuf.ByteString getName() { + return name_; } - // required uint64 most_sig_bits = 2; - public static final int MOST_SIG_BITS_FIELD_NUMBER = 2; - private long mostSigBits_; - public boolean hasMostSigBits() { - return ((bitField0_ & 0x00000002) == 0x00000002); + // repeated .NameStringPair configuration = 2; + public static final int CONFIGURATION_FIELD_NUMBER = 2; + private java.util.List configuration_; + public java.util.List getConfigurationList() { + return configuration_; } - public long getMostSigBits() { - return mostSigBits_; + public java.util.List + getConfigurationOrBuilderList() { + return configuration_; + } + public int getConfigurationCount() { + return configuration_.size(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) { + return configuration_.get(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( + int index) { + return configuration_.get(index); } private void initFields() { - leastSigBits_ = 0L; - mostSigBits_ = 0L; + name_ = com.google.protobuf.ByteString.EMPTY; + configuration_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasLeastSigBits()) { + if (!hasName()) { memoizedIsInitialized = 0; return false; } - if (!hasMostSigBits()) { - memoizedIsInitialized = 0; - return false; + for (int i = 0; i < getConfigurationCount(); i++) { + if (!getConfiguration(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } } memoizedIsInitialized = 1; return true; @@ -9443,10 +10560,10 @@ public final class HBaseProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, leastSigBits_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, mostSigBits_); + output.writeBytes(1, name_); + } + for (int i = 0; i < configuration_.size(); i++) { + output.writeMessage(2, configuration_.get(i)); } getUnknownFields().writeTo(output); } @@ -9459,11 +10576,11 @@ public final class HBaseProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, leastSigBits_); + .computeBytesSize(1, name_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { + for (int i = 0; i < configuration_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, mostSigBits_); + .computeMessageSize(2, configuration_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -9482,22 +10599,19 @@ public final class HBaseProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID) obj; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor) obj; boolean result = true; - result = result && (hasLeastSigBits() == other.hasLeastSigBits()); - if (hasLeastSigBits()) { - result = result && (getLeastSigBits() - == other.getLeastSigBits()); - } - result = result && (hasMostSigBits() == other.hasMostSigBits()); - if (hasMostSigBits()) { - result = result && (getMostSigBits() - == other.getMostSigBits()); + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); } + result = result && getConfigurationList() + .equals(other.getConfigurationList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -9507,53 +10621,53 @@ public final class HBaseProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasLeastSigBits()) { - hash = (37 * hash) + LEAST_SIG_BITS_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getLeastSigBits()); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); } - if (hasMostSigBits()) { - hash = (37 * hash) + MOST_SIG_BITS_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getMostSigBits()); + if (getConfigurationCount() > 0) { + hash = (37 * hash) + CONFIGURATION_FIELD_NUMBER; + hash = (53 * hash) + getConfigurationList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -9562,7 +10676,7 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -9573,12 +10687,12 @@ public final class HBaseProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -9588,7 +10702,7 @@ public final class HBaseProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -9601,18 +10715,18 @@ public final class HBaseProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_UUID_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NamespaceDescriptor_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_UUID_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_NamespaceDescriptor_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -9623,6 +10737,7 @@ public final class HBaseProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getConfigurationFieldBuilder(); } } private static Builder create() { @@ -9631,10 +10746,14 @@ public final class HBaseProtos { public Builder clear() { super.clear(); - leastSigBits_ = 0L; + name_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000001); - mostSigBits_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); + if (configurationBuilder_ == null) { + configuration_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + configurationBuilder_.clear(); + } return this; } @@ -9644,24 +10763,24 @@ public final class HBaseProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID build() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -9669,149 +10788,353 @@ public final class HBaseProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID(this); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.leastSigBits_ = leastSigBits_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; + result.name_ = name_; + if (configurationBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + configuration_ = java.util.Collections.unmodifiableList(configuration_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.configuration_ = configuration_; + } else { + result.configuration_ = configurationBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance()) return this; + if (other.hasName()) { + setName(other.getName()); + } + if (configurationBuilder_ == null) { + if (!other.configuration_.isEmpty()) { + if (configuration_.isEmpty()) { + configuration_ = other.configuration_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureConfigurationIsMutable(); + configuration_.addAll(other.configuration_); + } + onChanged(); + } + } else { + if (!other.configuration_.isEmpty()) { + if (configurationBuilder_.isEmpty()) { + configurationBuilder_.dispose(); + configurationBuilder_ = null; + configuration_ = other.configuration_; + bitField0_ = (bitField0_ & ~0x00000002); + configurationBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getConfigurationFieldBuilder() : null; + } else { + configurationBuilder_.addAllMessages(other.configuration_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + for (int i = 0; i < getConfigurationCount(); i++) { + if (!getConfiguration(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addConfiguration(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required bytes name = 1; + private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY; + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public com.google.protobuf.ByteString getName() { + return name_; + } + public Builder setName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + + // repeated .NameStringPair configuration = 2; + private java.util.List configuration_ = + java.util.Collections.emptyList(); + private void ensureConfigurationIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + configuration_ = new java.util.ArrayList(configuration_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> configurationBuilder_; + + public java.util.List getConfigurationList() { + if (configurationBuilder_ == null) { + return java.util.Collections.unmodifiableList(configuration_); + } else { + return configurationBuilder_.getMessageList(); + } + } + public int getConfigurationCount() { + if (configurationBuilder_ == null) { + return configuration_.size(); + } else { + return configurationBuilder_.getCount(); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getConfiguration(int index) { + if (configurationBuilder_ == null) { + return configuration_.get(index); + } else { + return configurationBuilder_.getMessage(index); + } + } + public Builder setConfiguration( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { + if (configurationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureConfigurationIsMutable(); + configuration_.set(index, value); + onChanged(); + } else { + configurationBuilder_.setMessage(index, value); + } + return this; + } + public Builder setConfiguration( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { + if (configurationBuilder_ == null) { + ensureConfigurationIsMutable(); + configuration_.set(index, builderForValue.build()); + onChanged(); + } else { + configurationBuilder_.setMessage(index, builderForValue.build()); } - result.mostSigBits_ = mostSigBits_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; + return this; } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID)other); + public Builder addConfiguration(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { + if (configurationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureConfigurationIsMutable(); + configuration_.add(value); + onChanged(); } else { - super.mergeFrom(other); - return this; + configurationBuilder_.addMessage(value); } + return this; } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance()) return this; - if (other.hasLeastSigBits()) { - setLeastSigBits(other.getLeastSigBits()); + public Builder addConfiguration( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) { + if (configurationBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureConfigurationIsMutable(); + configuration_.add(index, value); + onChanged(); + } else { + configurationBuilder_.addMessage(index, value); } - if (other.hasMostSigBits()) { - setMostSigBits(other.getMostSigBits()); + return this; + } + public Builder addConfiguration( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { + if (configurationBuilder_ == null) { + ensureConfigurationIsMutable(); + configuration_.add(builderForValue.build()); + onChanged(); + } else { + configurationBuilder_.addMessage(builderForValue.build()); } - this.mergeUnknownFields(other.getUnknownFields()); return this; } - - public final boolean isInitialized() { - if (!hasLeastSigBits()) { - - return false; + public Builder addConfiguration( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) { + if (configurationBuilder_ == null) { + ensureConfigurationIsMutable(); + configuration_.add(index, builderForValue.build()); + onChanged(); + } else { + configurationBuilder_.addMessage(index, builderForValue.build()); } - if (!hasMostSigBits()) { - - return false; + return this; + } + public Builder addAllConfiguration( + java.lang.Iterable values) { + if (configurationBuilder_ == null) { + ensureConfigurationIsMutable(); + super.addAll(values, configuration_); + onChanged(); + } else { + configurationBuilder_.addAllMessages(values); } - return true; + return this; } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - leastSigBits_ = input.readUInt64(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - mostSigBits_ = input.readUInt64(); - break; - } - } + public Builder clearConfiguration() { + if (configurationBuilder_ == null) { + configuration_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + configurationBuilder_.clear(); } + return this; } - - private int bitField0_; - - // required uint64 least_sig_bits = 1; - private long leastSigBits_ ; - public boolean hasLeastSigBits() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public Builder removeConfiguration(int index) { + if (configurationBuilder_ == null) { + ensureConfigurationIsMutable(); + configuration_.remove(index); + onChanged(); + } else { + configurationBuilder_.remove(index); + } + return this; } - public long getLeastSigBits() { - return leastSigBits_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder getConfigurationBuilder( + int index) { + return getConfigurationFieldBuilder().getBuilder(index); } - public Builder setLeastSigBits(long value) { - bitField0_ |= 0x00000001; - leastSigBits_ = value; - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( + int index) { + if (configurationBuilder_ == null) { + return configuration_.get(index); } else { + return configurationBuilder_.getMessageOrBuilder(index); + } } - public Builder clearLeastSigBits() { - bitField0_ = (bitField0_ & ~0x00000001); - leastSigBits_ = 0L; - onChanged(); - return this; + public java.util.List + getConfigurationOrBuilderList() { + if (configurationBuilder_ != null) { + return configurationBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(configuration_); + } } - - // required uint64 most_sig_bits = 2; - private long mostSigBits_ ; - public boolean hasMostSigBits() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addConfigurationBuilder() { + return getConfigurationFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()); } - public long getMostSigBits() { - return mostSigBits_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addConfigurationBuilder( + int index) { + return getConfigurationFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance()); } - public Builder setMostSigBits(long value) { - bitField0_ |= 0x00000002; - mostSigBits_ = value; - onChanged(); - return this; + public java.util.List + getConfigurationBuilderList() { + return getConfigurationFieldBuilder().getBuilderList(); } - public Builder clearMostSigBits() { - bitField0_ = (bitField0_ & ~0x00000002); - mostSigBits_ = 0L; - onChanged(); - return this; + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> + getConfigurationFieldBuilder() { + if (configurationBuilder_ == null) { + configurationBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder>( + configuration_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + configuration_ = null; + } + return configurationBuilder_; } - // @@protoc_insertion_point(builder_scope:UUID) + // @@protoc_insertion_point(builder_scope:NamespaceDescriptor) } static { - defaultInstance = new UUID(true); + defaultInstance = new NamespaceDescriptor(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:UUID) + // @@protoc_insertion_point(class_scope:NamespaceDescriptor) } private static com.google.protobuf.Descriptors.Descriptor + internal_static_TableName_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_TableName_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_TableSchema_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -9896,6 +11219,11 @@ public final class HBaseProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_UUID_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_NamespaceDescriptor_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_NamespaceDescriptor_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -9905,58 +11233,70 @@ public final class HBaseProtos { descriptor; static { java.lang.String[] descriptorData = { - "\n\013hbase.proto\032\nCell.proto\"\226\001\n\013TableSchem" + - "a\022\014\n\004name\030\001 \001(\014\022#\n\nattributes\030\002 \003(\0132\017.By" + - "tesBytesPair\022,\n\017column_families\030\003 \003(\0132\023." + - "ColumnFamilySchema\022&\n\rconfiguration\030\004 \003(" + - "\0132\017.NameStringPair\"o\n\022ColumnFamilySchema" + - "\022\014\n\004name\030\001 \002(\014\022#\n\nattributes\030\002 \003(\0132\017.Byt" + - "esBytesPair\022&\n\rconfiguration\030\003 \003(\0132\017.Nam" + - "eStringPair\"w\n\nRegionInfo\022\021\n\tregion_id\030\001" + - " \002(\004\022\022\n\ntable_name\030\002 \002(\014\022\021\n\tstart_key\030\003 " + - "\001(\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007offline\030\005 \001(\010\022\r\n", - "\005split\030\006 \001(\010\"1\n\014FavoredNodes\022!\n\014favored_" + - "node\030\001 \003(\0132\013.ServerName\"\225\001\n\017RegionSpecif" + - "ier\0222\n\004type\030\001 \002(\0162$.RegionSpecifier.Regi" + - "onSpecifierType\022\r\n\005value\030\002 \002(\014\"?\n\023Region" + - "SpecifierType\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCODE" + - "D_REGION_NAME\020\002\"%\n\tTimeRange\022\014\n\004from\030\001 \001" + - "(\004\022\n\n\002to\030\002 \001(\004\"A\n\nServerName\022\021\n\thost_nam" + - "e\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(" + - "\004\"\033\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016NameSt" + - "ringPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n", - "\rNameBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 " + - "\001(\014\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006" + - "second\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030\001 " + - "\001(\t\022\r\n\005value\030\002 \001(\003\"\256\001\n\023SnapshotDescripti" + - "on\022\014\n\004name\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreat" + - "ion_time\030\003 \001(\003:\0010\022.\n\004type\030\004 \001(\0162\031.Snapsh" + - "otDescription.Type:\005FLUSH\022\017\n\007version\030\005 \001" + - "(\005\"\037\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH\020\001\"\n\n\010E" + - "mptyMsg\"\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\'\n\r" + - "BigDecimalMsg\022\026\n\016bigdecimal_msg\030\001 \002(\014\"5\n", - "\004UUID\022\026\n\016least_sig_bits\030\001 \002(\004\022\025\n\rmost_si" + - "g_bits\030\002 \002(\004*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n" + - "\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL" + - "\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n" + - "\005NO_OP\020\006B>\n*org.apache.hadoop.hbase.prot" + - "obuf.generatedB\013HBaseProtosH\001\240\001\001" + "\n\013hbase.proto\032\nCell.proto\"1\n\tTableName\022\021" + + "\n\tnamespace\030\001 \002(\014\022\021\n\tqualifier\030\002 \002(\014\"\250\001\n" + + "\013TableSchema\022\036\n\ntable_name\030\001 \001(\0132\n.Table" + + "Name\022#\n\nattributes\030\002 \003(\0132\017.BytesBytesPai" + + "r\022,\n\017column_families\030\003 \003(\0132\023.ColumnFamil" + + "ySchema\022&\n\rconfiguration\030\004 \003(\0132\017.NameStr" + + "ingPair\"o\n\022ColumnFamilySchema\022\014\n\004name\030\001 " + + "\002(\014\022#\n\nattributes\030\002 \003(\0132\017.BytesBytesPair" + + "\022&\n\rconfiguration\030\003 \003(\0132\017.NameStringPair" + + "\"\203\001\n\nRegionInfo\022\021\n\tregion_id\030\001 \002(\004\022\036\n\nta", + "ble_name\030\002 \002(\0132\n.TableName\022\021\n\tstart_key\030" + + "\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007offline\030\005 \001(\010\022" + + "\r\n\005split\030\006 \001(\010\"1\n\014FavoredNodes\022!\n\014favore" + + "d_node\030\001 \003(\0132\013.ServerName\"\225\001\n\017RegionSpec" + + "ifier\0222\n\004type\030\001 \002(\0162$.RegionSpecifier.Re" + + "gionSpecifierType\022\r\n\005value\030\002 \002(\014\"?\n\023Regi" + + "onSpecifierType\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCO" + + "DED_REGION_NAME\020\002\"%\n\tTimeRange\022\014\n\004from\030\001" + + " \001(\004\022\n\n\002to\030\002 \001(\004\"A\n\nServerName\022\021\n\thost_n" + + "ame\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_code\030\003 ", + "\001(\004\"\033\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016Name" + + "StringPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\"" + + ",\n\rNameBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030" + + "\002 \001(\014\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016" + + "\n\006second\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030" + + "\001 \001(\t\022\r\n\005value\030\002 \001(\003\"\256\001\n\023SnapshotDescrip" + + "tion\022\014\n\004name\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcre" + + "ation_time\030\003 \001(\003:\0010\022.\n\004type\030\004 \001(\0162\031.Snap" + + "shotDescription.Type:\005FLUSH\022\017\n\007version\030\005" + + " \001(\005\"\037\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH\020\001\"\n\n", + "\010EmptyMsg\"\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\'" + + "\n\rBigDecimalMsg\022\026\n\016bigdecimal_msg\030\001 \002(\014\"" + + "5\n\004UUID\022\026\n\016least_sig_bits\030\001 \002(\004\022\025\n\rmost_" + + "sig_bits\030\002 \002(\004\"K\n\023NamespaceDescriptor\022\014\n" + + "\004name\030\001 \002(\014\022&\n\rconfiguration\030\002 \003(\0132\017.Nam" + + "eStringPair*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\r" + + "LESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020" + + "\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005" + + "NO_OP\020\006B>\n*org.apache.hadoop.hbase.proto" + + "buf.generatedB\013HBaseProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.Descriptors.FileDescriptor root) { descriptor = root; - internal_static_TableSchema_descriptor = + internal_static_TableName_descriptor = getDescriptor().getMessageTypes().get(0); + internal_static_TableName_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_TableName_descriptor, + new java.lang.String[] { "Namespace", "Qualifier", }, + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.class, + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder.class); + internal_static_TableSchema_descriptor = + getDescriptor().getMessageTypes().get(1); internal_static_TableSchema_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TableSchema_descriptor, - new java.lang.String[] { "Name", "Attributes", "ColumnFamilies", "Configuration", }, + new java.lang.String[] { "TableName", "Attributes", "ColumnFamilies", "Configuration", }, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder.class); internal_static_ColumnFamilySchema_descriptor = - getDescriptor().getMessageTypes().get(1); + getDescriptor().getMessageTypes().get(2); internal_static_ColumnFamilySchema_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ColumnFamilySchema_descriptor, @@ -9964,7 +11304,7 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder.class); internal_static_RegionInfo_descriptor = - getDescriptor().getMessageTypes().get(2); + getDescriptor().getMessageTypes().get(3); internal_static_RegionInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionInfo_descriptor, @@ -9972,7 +11312,7 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder.class); internal_static_FavoredNodes_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(4); internal_static_FavoredNodes_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_FavoredNodes_descriptor, @@ -9980,7 +11320,7 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.FavoredNodes.Builder.class); internal_static_RegionSpecifier_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(5); internal_static_RegionSpecifier_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionSpecifier_descriptor, @@ -9988,7 +11328,7 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder.class); internal_static_TimeRange_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(6); internal_static_TimeRange_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TimeRange_descriptor, @@ -9996,7 +11336,7 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeRange.Builder.class); internal_static_ServerName_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(7); internal_static_ServerName_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ServerName_descriptor, @@ -10004,7 +11344,7 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder.class); internal_static_Coprocessor_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(8); internal_static_Coprocessor_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Coprocessor_descriptor, @@ -10012,7 +11352,7 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder.class); internal_static_NameStringPair_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(9); internal_static_NameStringPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NameStringPair_descriptor, @@ -10020,7 +11360,7 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder.class); internal_static_NameBytesPair_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(10); internal_static_NameBytesPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NameBytesPair_descriptor, @@ -10028,7 +11368,7 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair.Builder.class); internal_static_BytesBytesPair_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(11); internal_static_BytesBytesPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_BytesBytesPair_descriptor, @@ -10036,7 +11376,7 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair.Builder.class); internal_static_NameInt64Pair_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(12); internal_static_NameInt64Pair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NameInt64Pair_descriptor, @@ -10044,7 +11384,7 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair.Builder.class); internal_static_SnapshotDescription_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(13); internal_static_SnapshotDescription_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SnapshotDescription_descriptor, @@ -10052,7 +11392,7 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder.class); internal_static_EmptyMsg_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(14); internal_static_EmptyMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_EmptyMsg_descriptor, @@ -10060,7 +11400,7 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg.Builder.class); internal_static_LongMsg_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(15); internal_static_LongMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_LongMsg_descriptor, @@ -10068,7 +11408,7 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.LongMsg.Builder.class); internal_static_BigDecimalMsg_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(16); internal_static_BigDecimalMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_BigDecimalMsg_descriptor, @@ -10076,13 +11416,21 @@ public final class HBaseProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BigDecimalMsg.Builder.class); internal_static_UUID_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(17); internal_static_UUID_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_UUID_descriptor, new java.lang.String[] { "LeastSigBits", "MostSigBits", }, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder.class); + internal_static_NamespaceDescriptor_descriptor = + getDescriptor().getMessageTypes().get(18); + internal_static_NamespaceDescriptor_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_NamespaceDescriptor_descriptor, + new java.lang.String[] { "Name", "Configuration", }, + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.class, + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder.class); return null; } }; diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java index 7f7cc70..bc3deb2 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java @@ -11,9 +11,10 @@ public final class MasterAdminProtos { public interface AddColumnRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bytes table_name = 1; + // required .TableName table_name = 1; boolean hasTableName(); - com.google.protobuf.ByteString getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); // required .ColumnFamilySchema column_families = 2; boolean hasColumnFamilies(); @@ -49,13 +50,16 @@ public final class MasterAdminProtos { } private int bitField0_; - // required bytes table_name = 1; + // required .TableName table_name = 1; public static final int TABLE_NAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString tableName_; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { return tableName_; } @@ -73,7 +77,7 @@ public final class MasterAdminProtos { } private void initFields() { - tableName_ = com.google.protobuf.ByteString.EMPTY; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); columnFamilies_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); } private byte memoizedIsInitialized = -1; @@ -89,6 +93,10 @@ public final class MasterAdminProtos { memoizedIsInitialized = 0; return false; } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } if (!getColumnFamilies().isInitialized()) { memoizedIsInitialized = 0; return false; @@ -101,7 +109,7 @@ public final class MasterAdminProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, tableName_); + output.writeMessage(1, tableName_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, columnFamilies_); @@ -117,7 +125,7 @@ public final class MasterAdminProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, tableName_); + .computeMessageSize(1, tableName_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream @@ -281,6 +289,7 @@ public final class MasterAdminProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); getColumnFamiliesFieldBuilder(); } } @@ -290,7 +299,11 @@ public final class MasterAdminProtos { public Builder clear() { super.clear(); - tableName_ = com.google.protobuf.ByteString.EMPTY; + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); if (columnFamiliesBuilder_ == null) { columnFamilies_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); @@ -339,7 +352,11 @@ public final class MasterAdminProtos { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.tableName_ = tableName_; + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } @@ -365,7 +382,7 @@ public final class MasterAdminProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest.getDefaultInstance()) return this; if (other.hasTableName()) { - setTableName(other.getTableName()); + mergeTableName(other.getTableName()); } if (other.hasColumnFamilies()) { mergeColumnFamilies(other.getColumnFamilies()); @@ -383,6 +400,10 @@ public final class MasterAdminProtos { return false; } + if (!getTableName().isInitialized()) { + + return false; + } if (!getColumnFamilies().isInitialized()) { return false; @@ -414,8 +435,12 @@ public final class MasterAdminProtos { break; } case 10: { - bitField0_ |= 0x00000001; - tableName_ = input.readBytes(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(); + if (hasTableName()) { + subBuilder.mergeFrom(getTableName()); + } + input.readMessage(subBuilder, extensionRegistry); + setTableName(subBuilder.buildPartial()); break; } case 18: { @@ -433,29 +458,95 @@ public final class MasterAdminProtos { private int bitField0_; - // required bytes table_name = 1; - private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + // required .TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { - return tableName_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } } - public Builder setTableName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - tableName_ = value; - onChanged(); + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; return this; } public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); - tableName_ = getDefaultInstance().getTableName(); - onChanged(); return this; } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } // required .ColumnFamilySchema column_families = 2; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema columnFamilies_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); @@ -861,9 +952,10 @@ public final class MasterAdminProtos { public interface DeleteColumnRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bytes table_name = 1; + // required .TableName table_name = 1; boolean hasTableName(); - com.google.protobuf.ByteString getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); // required bytes column_name = 2; boolean hasColumnName(); @@ -898,13 +990,16 @@ public final class MasterAdminProtos { } private int bitField0_; - // required bytes table_name = 1; + // required .TableName table_name = 1; public static final int TABLE_NAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString tableName_; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { return tableName_; } @@ -919,7 +1014,7 @@ public final class MasterAdminProtos { } private void initFields() { - tableName_ = com.google.protobuf.ByteString.EMPTY; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); columnName_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; @@ -935,6 +1030,10 @@ public final class MasterAdminProtos { memoizedIsInitialized = 0; return false; } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -943,7 +1042,7 @@ public final class MasterAdminProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, tableName_); + output.writeMessage(1, tableName_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, columnName_); @@ -959,7 +1058,7 @@ public final class MasterAdminProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, tableName_); + .computeMessageSize(1, tableName_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream @@ -1123,6 +1222,7 @@ public final class MasterAdminProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); } } private static Builder create() { @@ -1131,7 +1231,11 @@ public final class MasterAdminProtos { public Builder clear() { super.clear(); - tableName_ = com.google.protobuf.ByteString.EMPTY; + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); columnName_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); @@ -1176,7 +1280,11 @@ public final class MasterAdminProtos { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.tableName_ = tableName_; + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } @@ -1198,7 +1306,7 @@ public final class MasterAdminProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest.getDefaultInstance()) return this; if (other.hasTableName()) { - setTableName(other.getTableName()); + mergeTableName(other.getTableName()); } if (other.hasColumnName()) { setColumnName(other.getColumnName()); @@ -1216,6 +1324,10 @@ public final class MasterAdminProtos { return false; } + if (!getTableName().isInitialized()) { + + return false; + } return true; } @@ -1243,8 +1355,12 @@ public final class MasterAdminProtos { break; } case 10: { - bitField0_ |= 0x00000001; - tableName_ = input.readBytes(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(); + if (hasTableName()) { + subBuilder.mergeFrom(getTableName()); + } + input.readMessage(subBuilder, extensionRegistry); + setTableName(subBuilder.buildPartial()); break; } case 18: { @@ -1258,29 +1374,95 @@ public final class MasterAdminProtos { private int bitField0_; - // required bytes table_name = 1; - private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + // required .TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { - return tableName_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } } - public Builder setTableName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - tableName_ = value; - onChanged(); + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; return this; } public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); - tableName_ = getDefaultInstance().getTableName(); - onChanged(); return this; } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } // required bytes column_name = 2; private com.google.protobuf.ByteString columnName_ = com.google.protobuf.ByteString.EMPTY; @@ -1620,9 +1802,10 @@ public final class MasterAdminProtos { public interface ModifyColumnRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bytes table_name = 1; + // required .TableName table_name = 1; boolean hasTableName(); - com.google.protobuf.ByteString getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); // required .ColumnFamilySchema column_families = 2; boolean hasColumnFamilies(); @@ -1658,13 +1841,16 @@ public final class MasterAdminProtos { } private int bitField0_; - // required bytes table_name = 1; + // required .TableName table_name = 1; public static final int TABLE_NAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString tableName_; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { return tableName_; } @@ -1682,7 +1868,7 @@ public final class MasterAdminProtos { } private void initFields() { - tableName_ = com.google.protobuf.ByteString.EMPTY; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); columnFamilies_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); } private byte memoizedIsInitialized = -1; @@ -1698,6 +1884,10 @@ public final class MasterAdminProtos { memoizedIsInitialized = 0; return false; } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } if (!getColumnFamilies().isInitialized()) { memoizedIsInitialized = 0; return false; @@ -1710,7 +1900,7 @@ public final class MasterAdminProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, tableName_); + output.writeMessage(1, tableName_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, columnFamilies_); @@ -1726,7 +1916,7 @@ public final class MasterAdminProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, tableName_); + .computeMessageSize(1, tableName_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream @@ -1890,6 +2080,7 @@ public final class MasterAdminProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); getColumnFamiliesFieldBuilder(); } } @@ -1899,7 +2090,11 @@ public final class MasterAdminProtos { public Builder clear() { super.clear(); - tableName_ = com.google.protobuf.ByteString.EMPTY; + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); if (columnFamiliesBuilder_ == null) { columnFamilies_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); @@ -1948,7 +2143,11 @@ public final class MasterAdminProtos { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.tableName_ = tableName_; + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } @@ -1974,7 +2173,7 @@ public final class MasterAdminProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest.getDefaultInstance()) return this; if (other.hasTableName()) { - setTableName(other.getTableName()); + mergeTableName(other.getTableName()); } if (other.hasColumnFamilies()) { mergeColumnFamilies(other.getColumnFamilies()); @@ -1992,6 +2191,10 @@ public final class MasterAdminProtos { return false; } + if (!getTableName().isInitialized()) { + + return false; + } if (!getColumnFamilies().isInitialized()) { return false; @@ -2023,8 +2226,12 @@ public final class MasterAdminProtos { break; } case 10: { - bitField0_ |= 0x00000001; - tableName_ = input.readBytes(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(); + if (hasTableName()) { + subBuilder.mergeFrom(getTableName()); + } + input.readMessage(subBuilder, extensionRegistry); + setTableName(subBuilder.buildPartial()); break; } case 18: { @@ -2042,29 +2249,95 @@ public final class MasterAdminProtos { private int bitField0_; - // required bytes table_name = 1; - private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + // required .TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { - return tableName_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } } - public Builder setTableName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - tableName_ = value; - onChanged(); + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; return this; } public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); - tableName_ = getDefaultInstance().getTableName(); - onChanged(); return this; } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } // required .ColumnFamilySchema column_families = 2; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema columnFamilies_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); @@ -7683,9 +7956,10 @@ public final class MasterAdminProtos { public interface DeleteTableRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bytes table_name = 1; + // required .TableName table_name = 1; boolean hasTableName(); - com.google.protobuf.ByteString getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); } public static final class DeleteTableRequest extends com.google.protobuf.GeneratedMessage @@ -7716,18 +7990,21 @@ public final class MasterAdminProtos { } private int bitField0_; - // required bytes table_name = 1; + // required .TableName table_name = 1; public static final int TABLE_NAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString tableName_; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { return tableName_; } private void initFields() { - tableName_ = com.google.protobuf.ByteString.EMPTY; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -7738,6 +8015,10 @@ public final class MasterAdminProtos { memoizedIsInitialized = 0; return false; } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -7746,7 +8027,7 @@ public final class MasterAdminProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, tableName_); + output.writeMessage(1, tableName_); } getUnknownFields().writeTo(output); } @@ -7759,7 +8040,7 @@ public final class MasterAdminProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, tableName_); + .computeMessageSize(1, tableName_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -7910,6 +8191,7 @@ public final class MasterAdminProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); } } private static Builder create() { @@ -7918,7 +8200,11 @@ public final class MasterAdminProtos { public Builder clear() { super.clear(); - tableName_ = com.google.protobuf.ByteString.EMPTY; + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -7961,7 +8247,11 @@ public final class MasterAdminProtos { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.tableName_ = tableName_; + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -7979,7 +8269,7 @@ public final class MasterAdminProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest.getDefaultInstance()) return this; if (other.hasTableName()) { - setTableName(other.getTableName()); + mergeTableName(other.getTableName()); } this.mergeUnknownFields(other.getUnknownFields()); return this; @@ -7990,6 +8280,10 @@ public final class MasterAdminProtos { return false; } + if (!getTableName().isInitialized()) { + + return false; + } return true; } @@ -8017,8 +8311,12 @@ public final class MasterAdminProtos { break; } case 10: { - bitField0_ |= 0x00000001; - tableName_ = input.readBytes(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(); + if (hasTableName()) { + subBuilder.mergeFrom(getTableName()); + } + input.readMessage(subBuilder, extensionRegistry); + setTableName(subBuilder.buildPartial()); break; } } @@ -8027,29 +8325,95 @@ public final class MasterAdminProtos { private int bitField0_; - // required bytes table_name = 1; - private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + // required .TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { - return tableName_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } } - public Builder setTableName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - tableName_ = value; - onChanged(); + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; return this; } public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); - tableName_ = getDefaultInstance().getTableName(); - onChanged(); return this; } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } // @@protoc_insertion_point(builder_scope:DeleteTableRequest) } @@ -8365,9 +8729,10 @@ public final class MasterAdminProtos { public interface EnableTableRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bytes table_name = 1; + // required .TableName table_name = 1; boolean hasTableName(); - com.google.protobuf.ByteString getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); } public static final class EnableTableRequest extends com.google.protobuf.GeneratedMessage @@ -8398,18 +8763,21 @@ public final class MasterAdminProtos { } private int bitField0_; - // required bytes table_name = 1; + // required .TableName table_name = 1; public static final int TABLE_NAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString tableName_; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { return tableName_; } private void initFields() { - tableName_ = com.google.protobuf.ByteString.EMPTY; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -8420,6 +8788,10 @@ public final class MasterAdminProtos { memoizedIsInitialized = 0; return false; } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -8428,7 +8800,7 @@ public final class MasterAdminProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, tableName_); + output.writeMessage(1, tableName_); } getUnknownFields().writeTo(output); } @@ -8441,7 +8813,7 @@ public final class MasterAdminProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, tableName_); + .computeMessageSize(1, tableName_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -8592,6 +8964,7 @@ public final class MasterAdminProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); } } private static Builder create() { @@ -8600,7 +8973,11 @@ public final class MasterAdminProtos { public Builder clear() { super.clear(); - tableName_ = com.google.protobuf.ByteString.EMPTY; + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -8643,7 +9020,11 @@ public final class MasterAdminProtos { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.tableName_ = tableName_; + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -8661,7 +9042,7 @@ public final class MasterAdminProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest.getDefaultInstance()) return this; if (other.hasTableName()) { - setTableName(other.getTableName()); + mergeTableName(other.getTableName()); } this.mergeUnknownFields(other.getUnknownFields()); return this; @@ -8672,6 +9053,10 @@ public final class MasterAdminProtos { return false; } + if (!getTableName().isInitialized()) { + + return false; + } return true; } @@ -8699,8 +9084,12 @@ public final class MasterAdminProtos { break; } case 10: { - bitField0_ |= 0x00000001; - tableName_ = input.readBytes(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(); + if (hasTableName()) { + subBuilder.mergeFrom(getTableName()); + } + input.readMessage(subBuilder, extensionRegistry); + setTableName(subBuilder.buildPartial()); break; } } @@ -8709,29 +9098,95 @@ public final class MasterAdminProtos { private int bitField0_; - // required bytes table_name = 1; - private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + // required .TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { - return tableName_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } } - public Builder setTableName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - tableName_ = value; - onChanged(); + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; return this; } public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); - tableName_ = getDefaultInstance().getTableName(); - onChanged(); return this; } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } // @@protoc_insertion_point(builder_scope:EnableTableRequest) } @@ -9047,9 +9502,10 @@ public final class MasterAdminProtos { public interface DisableTableRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bytes table_name = 1; + // required .TableName table_name = 1; boolean hasTableName(); - com.google.protobuf.ByteString getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); } public static final class DisableTableRequest extends com.google.protobuf.GeneratedMessage @@ -9080,18 +9536,21 @@ public final class MasterAdminProtos { } private int bitField0_; - // required bytes table_name = 1; + // required .TableName table_name = 1; public static final int TABLE_NAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString tableName_; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { return tableName_; } private void initFields() { - tableName_ = com.google.protobuf.ByteString.EMPTY; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -9102,6 +9561,10 @@ public final class MasterAdminProtos { memoizedIsInitialized = 0; return false; } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -9110,7 +9573,7 @@ public final class MasterAdminProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, tableName_); + output.writeMessage(1, tableName_); } getUnknownFields().writeTo(output); } @@ -9123,7 +9586,7 @@ public final class MasterAdminProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, tableName_); + .computeMessageSize(1, tableName_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -9274,6 +9737,7 @@ public final class MasterAdminProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); } } private static Builder create() { @@ -9282,7 +9746,11 @@ public final class MasterAdminProtos { public Builder clear() { super.clear(); - tableName_ = com.google.protobuf.ByteString.EMPTY; + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -9325,7 +9793,11 @@ public final class MasterAdminProtos { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.tableName_ = tableName_; + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -9343,7 +9815,7 @@ public final class MasterAdminProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest.getDefaultInstance()) return this; if (other.hasTableName()) { - setTableName(other.getTableName()); + mergeTableName(other.getTableName()); } this.mergeUnknownFields(other.getUnknownFields()); return this; @@ -9354,6 +9826,10 @@ public final class MasterAdminProtos { return false; } + if (!getTableName().isInitialized()) { + + return false; + } return true; } @@ -9381,8 +9857,12 @@ public final class MasterAdminProtos { break; } case 10: { - bitField0_ |= 0x00000001; - tableName_ = input.readBytes(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(); + if (hasTableName()) { + subBuilder.mergeFrom(getTableName()); + } + input.readMessage(subBuilder, extensionRegistry); + setTableName(subBuilder.buildPartial()); break; } } @@ -9391,29 +9871,95 @@ public final class MasterAdminProtos { private int bitField0_; - // required bytes table_name = 1; - private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + // required .TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { - return tableName_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } } - public Builder setTableName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - tableName_ = value; - onChanged(); + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; return this; } public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); - tableName_ = getDefaultInstance().getTableName(); - onChanged(); return this; } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } // @@protoc_insertion_point(builder_scope:DisableTableRequest) } @@ -9729,9 +10275,10 @@ public final class MasterAdminProtos { public interface ModifyTableRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bytes table_name = 1; + // required .TableName table_name = 1; boolean hasTableName(); - com.google.protobuf.ByteString getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); // required .TableSchema table_schema = 2; boolean hasTableSchema(); @@ -9767,13 +10314,16 @@ public final class MasterAdminProtos { } private int bitField0_; - // required bytes table_name = 1; + // required .TableName table_name = 1; public static final int TABLE_NAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString tableName_; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { return tableName_; } @@ -9791,7 +10341,7 @@ public final class MasterAdminProtos { } private void initFields() { - tableName_ = com.google.protobuf.ByteString.EMPTY; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); } private byte memoizedIsInitialized = -1; @@ -9807,6 +10357,10 @@ public final class MasterAdminProtos { memoizedIsInitialized = 0; return false; } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } if (!getTableSchema().isInitialized()) { memoizedIsInitialized = 0; return false; @@ -9819,7 +10373,7 @@ public final class MasterAdminProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, tableName_); + output.writeMessage(1, tableName_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, tableSchema_); @@ -9835,7 +10389,7 @@ public final class MasterAdminProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, tableName_); + .computeMessageSize(1, tableName_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream @@ -9999,6 +10553,7 @@ public final class MasterAdminProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); getTableSchemaFieldBuilder(); } } @@ -10008,7 +10563,11 @@ public final class MasterAdminProtos { public Builder clear() { super.clear(); - tableName_ = com.google.protobuf.ByteString.EMPTY; + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); if (tableSchemaBuilder_ == null) { tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); @@ -10057,7 +10616,11 @@ public final class MasterAdminProtos { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.tableName_ = tableName_; + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } @@ -10083,7 +10646,7 @@ public final class MasterAdminProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest.getDefaultInstance()) return this; if (other.hasTableName()) { - setTableName(other.getTableName()); + mergeTableName(other.getTableName()); } if (other.hasTableSchema()) { mergeTableSchema(other.getTableSchema()); @@ -10101,6 +10664,10 @@ public final class MasterAdminProtos { return false; } + if (!getTableName().isInitialized()) { + + return false; + } if (!getTableSchema().isInitialized()) { return false; @@ -10132,8 +10699,12 @@ public final class MasterAdminProtos { break; } case 10: { - bitField0_ |= 0x00000001; - tableName_ = input.readBytes(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(); + if (hasTableName()) { + subBuilder.mergeFrom(getTableName()); + } + input.readMessage(subBuilder, extensionRegistry); + setTableName(subBuilder.buildPartial()); break; } case 18: { @@ -10151,29 +10722,95 @@ public final class MasterAdminProtos { private int bitField0_; - // required bytes table_name = 1; - private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + // required .TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { - return tableName_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } } - public Builder setTableName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - tableName_ = value; - onChanged(); + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; return this; } public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); - tableName_ = getDefaultInstance().getTableName(); - onChanged(); return this; } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } // required .TableSchema table_schema = 2; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); @@ -10347,12 +10984,4819 @@ public final class MasterAdminProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ModifyTableResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ModifyTableResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:ModifyTableResponse) + } + + static { + defaultInstance = new ModifyTableResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ModifyTableResponse) + } + + public interface CreateNamespaceRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .NamespaceDescriptor namespaceDescriptor = 1; + boolean hasNamespaceDescriptor(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder(); + } + public static final class CreateNamespaceRequest extends + com.google.protobuf.GeneratedMessage + implements CreateNamespaceRequestOrBuilder { + // Use CreateNamespaceRequest.newBuilder() to construct. + private CreateNamespaceRequest(Builder builder) { + super(builder); + } + private CreateNamespaceRequest(boolean noInit) {} + + private static final CreateNamespaceRequest defaultInstance; + public static CreateNamespaceRequest getDefaultInstance() { + return defaultInstance; + } + + public CreateNamespaceRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_CreateNamespaceRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_CreateNamespaceRequest_fieldAccessorTable; + } + + private int bitField0_; + // required .NamespaceDescriptor namespaceDescriptor = 1; + public static final int NAMESPACEDESCRIPTOR_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor namespaceDescriptor_; + public boolean hasNamespaceDescriptor() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor() { + return namespaceDescriptor_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder() { + return namespaceDescriptor_; + } + + private void initFields() { + namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasNamespaceDescriptor()) { + memoizedIsInitialized = 0; + return false; + } + if (!getNamespaceDescriptor().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, namespaceDescriptor_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, namespaceDescriptor_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest) obj; + + boolean result = true; + result = result && (hasNamespaceDescriptor() == other.hasNamespaceDescriptor()); + if (hasNamespaceDescriptor()) { + result = result && getNamespaceDescriptor() + .equals(other.getNamespaceDescriptor()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasNamespaceDescriptor()) { + hash = (37 * hash) + NAMESPACEDESCRIPTOR_FIELD_NUMBER; + hash = (53 * hash) + getNamespaceDescriptor().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_CreateNamespaceRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_CreateNamespaceRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getNamespaceDescriptorFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (namespaceDescriptorBuilder_ == null) { + namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance(); + } else { + namespaceDescriptorBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (namespaceDescriptorBuilder_ == null) { + result.namespaceDescriptor_ = namespaceDescriptor_; + } else { + result.namespaceDescriptor_ = namespaceDescriptorBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest.getDefaultInstance()) return this; + if (other.hasNamespaceDescriptor()) { + mergeNamespaceDescriptor(other.getNamespaceDescriptor()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasNamespaceDescriptor()) { + + return false; + } + if (!getNamespaceDescriptor().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.newBuilder(); + if (hasNamespaceDescriptor()) { + subBuilder.mergeFrom(getNamespaceDescriptor()); + } + input.readMessage(subBuilder, extensionRegistry); + setNamespaceDescriptor(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .NamespaceDescriptor namespaceDescriptor = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder> namespaceDescriptorBuilder_; + public boolean hasNamespaceDescriptor() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor() { + if (namespaceDescriptorBuilder_ == null) { + return namespaceDescriptor_; + } else { + return namespaceDescriptorBuilder_.getMessage(); + } + } + public Builder setNamespaceDescriptor(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor value) { + if (namespaceDescriptorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + namespaceDescriptor_ = value; + onChanged(); + } else { + namespaceDescriptorBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setNamespaceDescriptor( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder builderForValue) { + if (namespaceDescriptorBuilder_ == null) { + namespaceDescriptor_ = builderForValue.build(); + onChanged(); + } else { + namespaceDescriptorBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeNamespaceDescriptor(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor value) { + if (namespaceDescriptorBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + namespaceDescriptor_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance()) { + namespaceDescriptor_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.newBuilder(namespaceDescriptor_).mergeFrom(value).buildPartial(); + } else { + namespaceDescriptor_ = value; + } + onChanged(); + } else { + namespaceDescriptorBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearNamespaceDescriptor() { + if (namespaceDescriptorBuilder_ == null) { + namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance(); + onChanged(); + } else { + namespaceDescriptorBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder getNamespaceDescriptorBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getNamespaceDescriptorFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder() { + if (namespaceDescriptorBuilder_ != null) { + return namespaceDescriptorBuilder_.getMessageOrBuilder(); + } else { + return namespaceDescriptor_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder> + getNamespaceDescriptorFieldBuilder() { + if (namespaceDescriptorBuilder_ == null) { + namespaceDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder>( + namespaceDescriptor_, + getParentForChildren(), + isClean()); + namespaceDescriptor_ = null; + } + return namespaceDescriptorBuilder_; + } + + // @@protoc_insertion_point(builder_scope:CreateNamespaceRequest) + } + + static { + defaultInstance = new CreateNamespaceRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:CreateNamespaceRequest) + } + + public interface CreateNamespaceResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class CreateNamespaceResponse extends + com.google.protobuf.GeneratedMessage + implements CreateNamespaceResponseOrBuilder { + // Use CreateNamespaceResponse.newBuilder() to construct. + private CreateNamespaceResponse(Builder builder) { + super(builder); + } + private CreateNamespaceResponse(boolean noInit) {} + + private static final CreateNamespaceResponse defaultInstance; + public static CreateNamespaceResponse getDefaultInstance() { + return defaultInstance; + } + + public CreateNamespaceResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_CreateNamespaceResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_CreateNamespaceResponse_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_CreateNamespaceResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_CreateNamespaceResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:CreateNamespaceResponse) + } + + static { + defaultInstance = new CreateNamespaceResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:CreateNamespaceResponse) + } + + public interface DeleteNamespaceRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string namespaceName = 1; + boolean hasNamespaceName(); + String getNamespaceName(); + } + public static final class DeleteNamespaceRequest extends + com.google.protobuf.GeneratedMessage + implements DeleteNamespaceRequestOrBuilder { + // Use DeleteNamespaceRequest.newBuilder() to construct. + private DeleteNamespaceRequest(Builder builder) { + super(builder); + } + private DeleteNamespaceRequest(boolean noInit) {} + + private static final DeleteNamespaceRequest defaultInstance; + public static DeleteNamespaceRequest getDefaultInstance() { + return defaultInstance; + } + + public DeleteNamespaceRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DeleteNamespaceRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DeleteNamespaceRequest_fieldAccessorTable; + } + + private int bitField0_; + // required string namespaceName = 1; + public static final int NAMESPACENAME_FIELD_NUMBER = 1; + private java.lang.Object namespaceName_; + public boolean hasNamespaceName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getNamespaceName() { + java.lang.Object ref = namespaceName_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + namespaceName_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getNamespaceNameBytes() { + java.lang.Object ref = namespaceName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + namespaceName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + namespaceName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasNamespaceName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNamespaceNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNamespaceNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest) obj; + + boolean result = true; + result = result && (hasNamespaceName() == other.hasNamespaceName()); + if (hasNamespaceName()) { + result = result && getNamespaceName() + .equals(other.getNamespaceName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasNamespaceName()) { + hash = (37 * hash) + NAMESPACENAME_FIELD_NUMBER; + hash = (53 * hash) + getNamespaceName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DeleteNamespaceRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DeleteNamespaceRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + namespaceName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.namespaceName_ = namespaceName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest.getDefaultInstance()) return this; + if (other.hasNamespaceName()) { + setNamespaceName(other.getNamespaceName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasNamespaceName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + namespaceName_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // required string namespaceName = 1; + private java.lang.Object namespaceName_ = ""; + public boolean hasNamespaceName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getNamespaceName() { + java.lang.Object ref = namespaceName_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + namespaceName_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setNamespaceName(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + namespaceName_ = value; + onChanged(); + return this; + } + public Builder clearNamespaceName() { + bitField0_ = (bitField0_ & ~0x00000001); + namespaceName_ = getDefaultInstance().getNamespaceName(); + onChanged(); + return this; + } + void setNamespaceName(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + namespaceName_ = value; + onChanged(); + } + + // @@protoc_insertion_point(builder_scope:DeleteNamespaceRequest) + } + + static { + defaultInstance = new DeleteNamespaceRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:DeleteNamespaceRequest) + } + + public interface DeleteNamespaceResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class DeleteNamespaceResponse extends + com.google.protobuf.GeneratedMessage + implements DeleteNamespaceResponseOrBuilder { + // Use DeleteNamespaceResponse.newBuilder() to construct. + private DeleteNamespaceResponse(Builder builder) { + super(builder); + } + private DeleteNamespaceResponse(boolean noInit) {} + + private static final DeleteNamespaceResponse defaultInstance; + public static DeleteNamespaceResponse getDefaultInstance() { + return defaultInstance; + } + + public DeleteNamespaceResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DeleteNamespaceResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DeleteNamespaceResponse_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DeleteNamespaceResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DeleteNamespaceResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:DeleteNamespaceResponse) + } + + static { + defaultInstance = new DeleteNamespaceResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:DeleteNamespaceResponse) + } + + public interface ModifyNamespaceRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .NamespaceDescriptor namespaceDescriptor = 1; + boolean hasNamespaceDescriptor(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder(); + } + public static final class ModifyNamespaceRequest extends + com.google.protobuf.GeneratedMessage + implements ModifyNamespaceRequestOrBuilder { + // Use ModifyNamespaceRequest.newBuilder() to construct. + private ModifyNamespaceRequest(Builder builder) { + super(builder); + } + private ModifyNamespaceRequest(boolean noInit) {} + + private static final ModifyNamespaceRequest defaultInstance; + public static ModifyNamespaceRequest getDefaultInstance() { + return defaultInstance; + } + + public ModifyNamespaceRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ModifyNamespaceRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ModifyNamespaceRequest_fieldAccessorTable; + } + + private int bitField0_; + // required .NamespaceDescriptor namespaceDescriptor = 1; + public static final int NAMESPACEDESCRIPTOR_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor namespaceDescriptor_; + public boolean hasNamespaceDescriptor() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor() { + return namespaceDescriptor_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder() { + return namespaceDescriptor_; + } + + private void initFields() { + namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasNamespaceDescriptor()) { + memoizedIsInitialized = 0; + return false; + } + if (!getNamespaceDescriptor().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, namespaceDescriptor_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, namespaceDescriptor_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest) obj; + + boolean result = true; + result = result && (hasNamespaceDescriptor() == other.hasNamespaceDescriptor()); + if (hasNamespaceDescriptor()) { + result = result && getNamespaceDescriptor() + .equals(other.getNamespaceDescriptor()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasNamespaceDescriptor()) { + hash = (37 * hash) + NAMESPACEDESCRIPTOR_FIELD_NUMBER; + hash = (53 * hash) + getNamespaceDescriptor().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ModifyNamespaceRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ModifyNamespaceRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getNamespaceDescriptorFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (namespaceDescriptorBuilder_ == null) { + namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance(); + } else { + namespaceDescriptorBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (namespaceDescriptorBuilder_ == null) { + result.namespaceDescriptor_ = namespaceDescriptor_; + } else { + result.namespaceDescriptor_ = namespaceDescriptorBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest.getDefaultInstance()) return this; + if (other.hasNamespaceDescriptor()) { + mergeNamespaceDescriptor(other.getNamespaceDescriptor()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasNamespaceDescriptor()) { + + return false; + } + if (!getNamespaceDescriptor().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.newBuilder(); + if (hasNamespaceDescriptor()) { + subBuilder.mergeFrom(getNamespaceDescriptor()); + } + input.readMessage(subBuilder, extensionRegistry); + setNamespaceDescriptor(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .NamespaceDescriptor namespaceDescriptor = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder> namespaceDescriptorBuilder_; + public boolean hasNamespaceDescriptor() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor() { + if (namespaceDescriptorBuilder_ == null) { + return namespaceDescriptor_; + } else { + return namespaceDescriptorBuilder_.getMessage(); + } + } + public Builder setNamespaceDescriptor(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor value) { + if (namespaceDescriptorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + namespaceDescriptor_ = value; + onChanged(); + } else { + namespaceDescriptorBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setNamespaceDescriptor( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder builderForValue) { + if (namespaceDescriptorBuilder_ == null) { + namespaceDescriptor_ = builderForValue.build(); + onChanged(); + } else { + namespaceDescriptorBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeNamespaceDescriptor(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor value) { + if (namespaceDescriptorBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + namespaceDescriptor_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance()) { + namespaceDescriptor_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.newBuilder(namespaceDescriptor_).mergeFrom(value).buildPartial(); + } else { + namespaceDescriptor_ = value; + } + onChanged(); + } else { + namespaceDescriptorBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearNamespaceDescriptor() { + if (namespaceDescriptorBuilder_ == null) { + namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance(); + onChanged(); + } else { + namespaceDescriptorBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder getNamespaceDescriptorBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getNamespaceDescriptorFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder() { + if (namespaceDescriptorBuilder_ != null) { + return namespaceDescriptorBuilder_.getMessageOrBuilder(); + } else { + return namespaceDescriptor_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder> + getNamespaceDescriptorFieldBuilder() { + if (namespaceDescriptorBuilder_ == null) { + namespaceDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder>( + namespaceDescriptor_, + getParentForChildren(), + isClean()); + namespaceDescriptor_ = null; + } + return namespaceDescriptorBuilder_; + } + + // @@protoc_insertion_point(builder_scope:ModifyNamespaceRequest) + } + + static { + defaultInstance = new ModifyNamespaceRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ModifyNamespaceRequest) + } + + public interface GetNamespaceDescriptorRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string namespaceName = 1; + boolean hasNamespaceName(); + String getNamespaceName(); + } + public static final class GetNamespaceDescriptorRequest extends + com.google.protobuf.GeneratedMessage + implements GetNamespaceDescriptorRequestOrBuilder { + // Use GetNamespaceDescriptorRequest.newBuilder() to construct. + private GetNamespaceDescriptorRequest(Builder builder) { + super(builder); + } + private GetNamespaceDescriptorRequest(boolean noInit) {} + + private static final GetNamespaceDescriptorRequest defaultInstance; + public static GetNamespaceDescriptorRequest getDefaultInstance() { + return defaultInstance; + } + + public GetNamespaceDescriptorRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_GetNamespaceDescriptorRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_GetNamespaceDescriptorRequest_fieldAccessorTable; + } + + private int bitField0_; + // required string namespaceName = 1; + public static final int NAMESPACENAME_FIELD_NUMBER = 1; + private java.lang.Object namespaceName_; + public boolean hasNamespaceName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getNamespaceName() { + java.lang.Object ref = namespaceName_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + namespaceName_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getNamespaceNameBytes() { + java.lang.Object ref = namespaceName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + namespaceName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + namespaceName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasNamespaceName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNamespaceNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNamespaceNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest) obj; + + boolean result = true; + result = result && (hasNamespaceName() == other.hasNamespaceName()); + if (hasNamespaceName()) { + result = result && getNamespaceName() + .equals(other.getNamespaceName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasNamespaceName()) { + hash = (37 * hash) + NAMESPACENAME_FIELD_NUMBER; + hash = (53 * hash) + getNamespaceName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_GetNamespaceDescriptorRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_GetNamespaceDescriptorRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + namespaceName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.namespaceName_ = namespaceName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest.getDefaultInstance()) return this; + if (other.hasNamespaceName()) { + setNamespaceName(other.getNamespaceName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasNamespaceName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + namespaceName_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // required string namespaceName = 1; + private java.lang.Object namespaceName_ = ""; + public boolean hasNamespaceName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getNamespaceName() { + java.lang.Object ref = namespaceName_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + namespaceName_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setNamespaceName(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + namespaceName_ = value; + onChanged(); + return this; + } + public Builder clearNamespaceName() { + bitField0_ = (bitField0_ & ~0x00000001); + namespaceName_ = getDefaultInstance().getNamespaceName(); + onChanged(); + return this; + } + void setNamespaceName(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + namespaceName_ = value; + onChanged(); + } + + // @@protoc_insertion_point(builder_scope:GetNamespaceDescriptorRequest) + } + + static { + defaultInstance = new GetNamespaceDescriptorRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetNamespaceDescriptorRequest) + } + + public interface GetNamespaceDescriptorResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .NamespaceDescriptor namespaceDescriptor = 1; + boolean hasNamespaceDescriptor(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder(); + } + public static final class GetNamespaceDescriptorResponse extends + com.google.protobuf.GeneratedMessage + implements GetNamespaceDescriptorResponseOrBuilder { + // Use GetNamespaceDescriptorResponse.newBuilder() to construct. + private GetNamespaceDescriptorResponse(Builder builder) { + super(builder); + } + private GetNamespaceDescriptorResponse(boolean noInit) {} + + private static final GetNamespaceDescriptorResponse defaultInstance; + public static GetNamespaceDescriptorResponse getDefaultInstance() { + return defaultInstance; + } + + public GetNamespaceDescriptorResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_GetNamespaceDescriptorResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_GetNamespaceDescriptorResponse_fieldAccessorTable; + } + + private int bitField0_; + // required .NamespaceDescriptor namespaceDescriptor = 1; + public static final int NAMESPACEDESCRIPTOR_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor namespaceDescriptor_; + public boolean hasNamespaceDescriptor() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor() { + return namespaceDescriptor_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder() { + return namespaceDescriptor_; + } + + private void initFields() { + namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasNamespaceDescriptor()) { + memoizedIsInitialized = 0; + return false; + } + if (!getNamespaceDescriptor().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, namespaceDescriptor_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, namespaceDescriptor_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse) obj; + + boolean result = true; + result = result && (hasNamespaceDescriptor() == other.hasNamespaceDescriptor()); + if (hasNamespaceDescriptor()) { + result = result && getNamespaceDescriptor() + .equals(other.getNamespaceDescriptor()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasNamespaceDescriptor()) { + hash = (37 * hash) + NAMESPACEDESCRIPTOR_FIELD_NUMBER; + hash = (53 * hash) + getNamespaceDescriptor().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_GetNamespaceDescriptorResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_GetNamespaceDescriptorResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getNamespaceDescriptorFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (namespaceDescriptorBuilder_ == null) { + namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance(); + } else { + namespaceDescriptorBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (namespaceDescriptorBuilder_ == null) { + result.namespaceDescriptor_ = namespaceDescriptor_; + } else { + result.namespaceDescriptor_ = namespaceDescriptorBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse.getDefaultInstance()) return this; + if (other.hasNamespaceDescriptor()) { + mergeNamespaceDescriptor(other.getNamespaceDescriptor()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasNamespaceDescriptor()) { + + return false; + } + if (!getNamespaceDescriptor().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.newBuilder(); + if (hasNamespaceDescriptor()) { + subBuilder.mergeFrom(getNamespaceDescriptor()); + } + input.readMessage(subBuilder, extensionRegistry); + setNamespaceDescriptor(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // required .NamespaceDescriptor namespaceDescriptor = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder> namespaceDescriptorBuilder_; + public boolean hasNamespaceDescriptor() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor() { + if (namespaceDescriptorBuilder_ == null) { + return namespaceDescriptor_; + } else { + return namespaceDescriptorBuilder_.getMessage(); + } + } + public Builder setNamespaceDescriptor(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor value) { + if (namespaceDescriptorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + namespaceDescriptor_ = value; + onChanged(); + } else { + namespaceDescriptorBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setNamespaceDescriptor( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder builderForValue) { + if (namespaceDescriptorBuilder_ == null) { + namespaceDescriptor_ = builderForValue.build(); + onChanged(); + } else { + namespaceDescriptorBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeNamespaceDescriptor(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor value) { + if (namespaceDescriptorBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + namespaceDescriptor_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance()) { + namespaceDescriptor_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.newBuilder(namespaceDescriptor_).mergeFrom(value).buildPartial(); + } else { + namespaceDescriptor_ = value; + } + onChanged(); + } else { + namespaceDescriptorBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder clearNamespaceDescriptor() { + if (namespaceDescriptorBuilder_ == null) { + namespaceDescriptor_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance(); + onChanged(); + } else { + namespaceDescriptorBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder getNamespaceDescriptorBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getNamespaceDescriptorFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder() { + if (namespaceDescriptorBuilder_ != null) { + return namespaceDescriptorBuilder_.getMessageOrBuilder(); + } else { + return namespaceDescriptor_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder> + getNamespaceDescriptorFieldBuilder() { + if (namespaceDescriptorBuilder_ == null) { + namespaceDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder>( + namespaceDescriptor_, + getParentForChildren(), + isClean()); + namespaceDescriptor_ = null; + } + return namespaceDescriptorBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetNamespaceDescriptorResponse) + } + + static { + defaultInstance = new GetNamespaceDescriptorResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetNamespaceDescriptorResponse) + } + + public interface ModifyNamespaceResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class ModifyNamespaceResponse extends + com.google.protobuf.GeneratedMessage + implements ModifyNamespaceResponseOrBuilder { + // Use ModifyNamespaceResponse.newBuilder() to construct. + private ModifyNamespaceResponse(Builder builder) { + super(builder); + } + private ModifyNamespaceResponse(boolean noInit) {} + + private static final ModifyNamespaceResponse defaultInstance; + public static ModifyNamespaceResponse getDefaultInstance() { + return defaultInstance; + } + + public ModifyNamespaceResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ModifyNamespaceResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ModifyNamespaceResponse_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ModifyNamespaceResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ModifyNamespaceResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:ModifyNamespaceResponse) + } + + static { + defaultInstance = new ModifyNamespaceResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ModifyNamespaceResponse) + } + + public interface ListNamespaceDescriptorsRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + public static final class ListNamespaceDescriptorsRequest extends + com.google.protobuf.GeneratedMessage + implements ListNamespaceDescriptorsRequestOrBuilder { + // Use ListNamespaceDescriptorsRequest.newBuilder() to construct. + private ListNamespaceDescriptorsRequest(Builder builder) { + super(builder); + } + private ListNamespaceDescriptorsRequest(boolean noInit) {} + + private static final ListNamespaceDescriptorsRequest defaultInstance; + public static ListNamespaceDescriptorsRequest getDefaultInstance() { + return defaultInstance; + } + + public ListNamespaceDescriptorsRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ListNamespaceDescriptorsRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ListNamespaceDescriptorsRequest_fieldAccessorTable; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ListNamespaceDescriptorsRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ListNamespaceDescriptorsRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + } + } + } + + + // @@protoc_insertion_point(builder_scope:ListNamespaceDescriptorsRequest) + } + + static { + defaultInstance = new ListNamespaceDescriptorsRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ListNamespaceDescriptorsRequest) + } + + public interface ListNamespaceDescriptorsResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .NamespaceDescriptor namespaceDescriptor = 1; + java.util.List + getNamespaceDescriptorList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor(int index); + int getNamespaceDescriptorCount(); + java.util.List + getNamespaceDescriptorOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder( + int index); + } + public static final class ListNamespaceDescriptorsResponse extends + com.google.protobuf.GeneratedMessage + implements ListNamespaceDescriptorsResponseOrBuilder { + // Use ListNamespaceDescriptorsResponse.newBuilder() to construct. + private ListNamespaceDescriptorsResponse(Builder builder) { + super(builder); + } + private ListNamespaceDescriptorsResponse(boolean noInit) {} + + private static final ListNamespaceDescriptorsResponse defaultInstance; + public static ListNamespaceDescriptorsResponse getDefaultInstance() { + return defaultInstance; + } + + public ListNamespaceDescriptorsResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ListNamespaceDescriptorsResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ListNamespaceDescriptorsResponse_fieldAccessorTable; + } + + // repeated .NamespaceDescriptor namespaceDescriptor = 1; + public static final int NAMESPACEDESCRIPTOR_FIELD_NUMBER = 1; + private java.util.List namespaceDescriptor_; + public java.util.List getNamespaceDescriptorList() { + return namespaceDescriptor_; + } + public java.util.List + getNamespaceDescriptorOrBuilderList() { + return namespaceDescriptor_; + } + public int getNamespaceDescriptorCount() { + return namespaceDescriptor_.size(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor(int index) { + return namespaceDescriptor_.get(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder( + int index) { + return namespaceDescriptor_.get(index); + } + + private void initFields() { + namespaceDescriptor_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getNamespaceDescriptorCount(); i++) { + if (!getNamespaceDescriptor(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < namespaceDescriptor_.size(); i++) { + output.writeMessage(1, namespaceDescriptor_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < namespaceDescriptor_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, namespaceDescriptor_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse) obj; + + boolean result = true; + result = result && getNamespaceDescriptorList() + .equals(other.getNamespaceDescriptorList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getNamespaceDescriptorCount() > 0) { + hash = (37 * hash) + NAMESPACEDESCRIPTOR_FIELD_NUMBER; + hash = (53 * hash) + getNamespaceDescriptorList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ListNamespaceDescriptorsResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ListNamespaceDescriptorsResponse_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getNamespaceDescriptorFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (namespaceDescriptorBuilder_ == null) { + namespaceDescriptor_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + namespaceDescriptorBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse(this); + int from_bitField0_ = bitField0_; + if (namespaceDescriptorBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + namespaceDescriptor_ = java.util.Collections.unmodifiableList(namespaceDescriptor_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.namespaceDescriptor_ = namespaceDescriptor_; + } else { + result.namespaceDescriptor_ = namespaceDescriptorBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse.getDefaultInstance()) return this; + if (namespaceDescriptorBuilder_ == null) { + if (!other.namespaceDescriptor_.isEmpty()) { + if (namespaceDescriptor_.isEmpty()) { + namespaceDescriptor_ = other.namespaceDescriptor_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureNamespaceDescriptorIsMutable(); + namespaceDescriptor_.addAll(other.namespaceDescriptor_); + } + onChanged(); + } + } else { + if (!other.namespaceDescriptor_.isEmpty()) { + if (namespaceDescriptorBuilder_.isEmpty()) { + namespaceDescriptorBuilder_.dispose(); + namespaceDescriptorBuilder_ = null; + namespaceDescriptor_ = other.namespaceDescriptor_; + bitField0_ = (bitField0_ & ~0x00000001); + namespaceDescriptorBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getNamespaceDescriptorFieldBuilder() : null; + } else { + namespaceDescriptorBuilder_.addAllMessages(other.namespaceDescriptor_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getNamespaceDescriptorCount(); i++) { + if (!getNamespaceDescriptor(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addNamespaceDescriptor(subBuilder.buildPartial()); + break; + } + } + } + } + + private int bitField0_; + + // repeated .NamespaceDescriptor namespaceDescriptor = 1; + private java.util.List namespaceDescriptor_ = + java.util.Collections.emptyList(); + private void ensureNamespaceDescriptorIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + namespaceDescriptor_ = new java.util.ArrayList(namespaceDescriptor_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder> namespaceDescriptorBuilder_; + + public java.util.List getNamespaceDescriptorList() { + if (namespaceDescriptorBuilder_ == null) { + return java.util.Collections.unmodifiableList(namespaceDescriptor_); + } else { + return namespaceDescriptorBuilder_.getMessageList(); + } + } + public int getNamespaceDescriptorCount() { + if (namespaceDescriptorBuilder_ == null) { + return namespaceDescriptor_.size(); + } else { + return namespaceDescriptorBuilder_.getCount(); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor getNamespaceDescriptor(int index) { + if (namespaceDescriptorBuilder_ == null) { + return namespaceDescriptor_.get(index); + } else { + return namespaceDescriptorBuilder_.getMessage(index); + } + } + public Builder setNamespaceDescriptor( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor value) { + if (namespaceDescriptorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNamespaceDescriptorIsMutable(); + namespaceDescriptor_.set(index, value); + onChanged(); + } else { + namespaceDescriptorBuilder_.setMessage(index, value); + } + return this; + } + public Builder setNamespaceDescriptor( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder builderForValue) { + if (namespaceDescriptorBuilder_ == null) { + ensureNamespaceDescriptorIsMutable(); + namespaceDescriptor_.set(index, builderForValue.build()); + onChanged(); + } else { + namespaceDescriptorBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addNamespaceDescriptor(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor value) { + if (namespaceDescriptorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNamespaceDescriptorIsMutable(); + namespaceDescriptor_.add(value); + onChanged(); + } else { + namespaceDescriptorBuilder_.addMessage(value); + } + return this; + } + public Builder addNamespaceDescriptor( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor value) { + if (namespaceDescriptorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNamespaceDescriptorIsMutable(); + namespaceDescriptor_.add(index, value); + onChanged(); + } else { + namespaceDescriptorBuilder_.addMessage(index, value); + } + return this; + } + public Builder addNamespaceDescriptor( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder builderForValue) { + if (namespaceDescriptorBuilder_ == null) { + ensureNamespaceDescriptorIsMutable(); + namespaceDescriptor_.add(builderForValue.build()); + onChanged(); + } else { + namespaceDescriptorBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addNamespaceDescriptor( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder builderForValue) { + if (namespaceDescriptorBuilder_ == null) { + ensureNamespaceDescriptorIsMutable(); + namespaceDescriptor_.add(index, builderForValue.build()); + onChanged(); + } else { + namespaceDescriptorBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllNamespaceDescriptor( + java.lang.Iterable values) { + if (namespaceDescriptorBuilder_ == null) { + ensureNamespaceDescriptorIsMutable(); + super.addAll(values, namespaceDescriptor_); + onChanged(); + } else { + namespaceDescriptorBuilder_.addAllMessages(values); + } + return this; + } + public Builder clearNamespaceDescriptor() { + if (namespaceDescriptorBuilder_ == null) { + namespaceDescriptor_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + namespaceDescriptorBuilder_.clear(); + } + return this; + } + public Builder removeNamespaceDescriptor(int index) { + if (namespaceDescriptorBuilder_ == null) { + ensureNamespaceDescriptorIsMutable(); + namespaceDescriptor_.remove(index); + onChanged(); + } else { + namespaceDescriptorBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder getNamespaceDescriptorBuilder( + int index) { + return getNamespaceDescriptorFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder getNamespaceDescriptorOrBuilder( + int index) { + if (namespaceDescriptorBuilder_ == null) { + return namespaceDescriptor_.get(index); } else { + return namespaceDescriptorBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getNamespaceDescriptorOrBuilderList() { + if (namespaceDescriptorBuilder_ != null) { + return namespaceDescriptorBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(namespaceDescriptor_); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder addNamespaceDescriptorBuilder() { + return getNamespaceDescriptorFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance()); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder addNamespaceDescriptorBuilder( + int index) { + return getNamespaceDescriptorFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.getDefaultInstance()); + } + public java.util.List + getNamespaceDescriptorBuilderList() { + return getNamespaceDescriptorFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder> + getNamespaceDescriptorFieldBuilder() { + if (namespaceDescriptorBuilder_ == null) { + namespaceDescriptorBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NamespaceDescriptorOrBuilder>( + namespaceDescriptor_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + namespaceDescriptor_ = null; + } + return namespaceDescriptorBuilder_; + } + + // @@protoc_insertion_point(builder_scope:ListNamespaceDescriptorsResponse) + } + + static { + defaultInstance = new ListNamespaceDescriptorsResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ListNamespaceDescriptorsResponse) + } + + public interface GetTableDescriptorsByNamespaceRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string namespaceName = 1; + boolean hasNamespaceName(); + String getNamespaceName(); + } + public static final class GetTableDescriptorsByNamespaceRequest extends + com.google.protobuf.GeneratedMessage + implements GetTableDescriptorsByNamespaceRequestOrBuilder { + // Use GetTableDescriptorsByNamespaceRequest.newBuilder() to construct. + private GetTableDescriptorsByNamespaceRequest(Builder builder) { + super(builder); + } + private GetTableDescriptorsByNamespaceRequest(boolean noInit) {} + + private static final GetTableDescriptorsByNamespaceRequest defaultInstance; + public static GetTableDescriptorsByNamespaceRequest getDefaultInstance() { + return defaultInstance; + } + + public GetTableDescriptorsByNamespaceRequest getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_GetTableDescriptorsByNamespaceRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_GetTableDescriptorsByNamespaceRequest_fieldAccessorTable; + } + + private int bitField0_; + // required string namespaceName = 1; + public static final int NAMESPACENAME_FIELD_NUMBER = 1; + private java.lang.Object namespaceName_; + public boolean hasNamespaceName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getNamespaceName() { + java.lang.Object ref = namespaceName_; + if (ref instanceof String) { + return (String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + String s = bs.toStringUtf8(); + if (com.google.protobuf.Internal.isValidUtf8(bs)) { + namespaceName_ = s; + } + return s; + } + } + private com.google.protobuf.ByteString getNamespaceNameBytes() { + java.lang.Object ref = namespaceName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((String) ref); + namespaceName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + namespaceName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasNamespaceName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNamespaceNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNamespaceNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest) obj; + + boolean result = true; + result = result && (hasNamespaceName() == other.hasNamespaceName()); + if (hasNamespaceName()) { + result = result && getNamespaceName() + .equals(other.getNamespaceName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + @java.lang.Override + public int hashCode() { + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasNamespaceName()) { + hash = (37 * hash) + NAMESPACENAME_FIELD_NUMBER; + hash = (53 * hash) + getNamespaceName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return newBuilder().mergeFrom(data, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Builder builder = newBuilder(); + if (builder.mergeDelimitedFrom(input, extensionRegistry)) { + return builder.buildParsed(); + } else { + return null; + } + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return newBuilder().mergeFrom(input).buildParsed(); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return newBuilder().mergeFrom(input, extensionRegistry) + .buildParsed(); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_GetTableDescriptorsByNamespaceRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_GetTableDescriptorsByNamespaceRequest_fieldAccessorTable; + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + namespaceName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest.getDescriptor(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest buildParsed() + throws com.google.protobuf.InvalidProtocolBufferException { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException( + result).asInvalidProtocolBufferException(); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.namespaceName_ = namespaceName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest.getDefaultInstance()) return this; + if (other.hasNamespaceName()) { + setNamespaceName(other.getNamespaceName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasNamespaceName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder( + this.getUnknownFields()); + while (true) { + int tag = input.readTag(); + switch (tag) { + case 0: + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + this.setUnknownFields(unknownFields.build()); + onChanged(); + return this; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + namespaceName_ = input.readBytes(); + break; + } + } + } + } + + private int bitField0_; + + // required string namespaceName = 1; + private java.lang.Object namespaceName_ = ""; + public boolean hasNamespaceName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + public String getNamespaceName() { + java.lang.Object ref = namespaceName_; + if (!(ref instanceof String)) { + String s = ((com.google.protobuf.ByteString) ref).toStringUtf8(); + namespaceName_ = s; + return s; + } else { + return (String) ref; + } + } + public Builder setNamespaceName(String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + namespaceName_ = value; + onChanged(); + return this; + } + public Builder clearNamespaceName() { + bitField0_ = (bitField0_ & ~0x00000001); + namespaceName_ = getDefaultInstance().getNamespaceName(); + onChanged(); + return this; + } + void setNamespaceName(com.google.protobuf.ByteString value) { + bitField0_ |= 0x00000001; + namespaceName_ = value; + onChanged(); + } + + // @@protoc_insertion_point(builder_scope:GetTableDescriptorsByNamespaceRequest) + } + + static { + defaultInstance = new GetTableDescriptorsByNamespaceRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetTableDescriptorsByNamespaceRequest) + } + + public interface GetTableDescriptorsByNamespaceResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .TableSchema tableSchema = 1; + java.util.List + getTableSchemaList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema(int index); + int getTableSchemaCount(); + java.util.List + getTableSchemaOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder( + int index); + } + public static final class GetTableDescriptorsByNamespaceResponse extends + com.google.protobuf.GeneratedMessage + implements GetTableDescriptorsByNamespaceResponseOrBuilder { + // Use GetTableDescriptorsByNamespaceResponse.newBuilder() to construct. + private GetTableDescriptorsByNamespaceResponse(Builder builder) { + super(builder); + } + private GetTableDescriptorsByNamespaceResponse(boolean noInit) {} + + private static final GetTableDescriptorsByNamespaceResponse defaultInstance; + public static GetTableDescriptorsByNamespaceResponse getDefaultInstance() { + return defaultInstance; + } + + public GetTableDescriptorsByNamespaceResponse getDefaultInstanceForType() { + return defaultInstance; + } + + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_GetTableDescriptorsByNamespaceResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_GetTableDescriptorsByNamespaceResponse_fieldAccessorTable; + } + + // repeated .TableSchema tableSchema = 1; + public static final int TABLESCHEMA_FIELD_NUMBER = 1; + private java.util.List tableSchema_; + public java.util.List getTableSchemaList() { + return tableSchema_; + } + public java.util.List + getTableSchemaOrBuilderList() { + return tableSchema_; + } + public int getTableSchemaCount() { + return tableSchema_.size(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema(int index) { + return tableSchema_.get(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder( + int index) { + return tableSchema_.get(index); + } + + private void initFields() { + tableSchema_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getTableSchemaCount(); i++) { + if (!getTableSchema(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < tableSchema_.size(); i++) { + output.writeMessage(1, tableSchema_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < tableSchema_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableSchema_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse) obj; boolean result = true; + result = result && getTableSchemaList() + .equals(other.getTableSchemaList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -10362,45 +15806,49 @@ public final class MasterAdminProtos { public int hashCode() { int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getTableSchemaCount() > 0) { + hash = (37 * hash) + TABLESCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getTableSchemaList().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return newBuilder().mergeFrom(data, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { Builder builder = newBuilder(); if (builder.mergeDelimitedFrom(input)) { @@ -10409,7 +15857,7 @@ public final class MasterAdminProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -10420,12 +15868,12 @@ public final class MasterAdminProtos { return null; } } - public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return newBuilder().mergeFrom(input).buildParsed(); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -10435,7 +15883,7 @@ public final class MasterAdminProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -10448,18 +15896,18 @@ public final class MasterAdminProtos { } public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ModifyTableResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_GetTableDescriptorsByNamespaceResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_ModifyTableResponse_fieldAccessorTable; + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_GetTableDescriptorsByNamespaceResponse_fieldAccessorTable; } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -10470,6 +15918,7 @@ public final class MasterAdminProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableSchemaFieldBuilder(); } } private static Builder create() { @@ -10478,6 +15927,12 @@ public final class MasterAdminProtos { public Builder clear() { super.clear(); + if (tableSchemaBuilder_ == null) { + tableSchema_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + tableSchemaBuilder_.clear(); + } return this; } @@ -10487,24 +15942,24 @@ public final class MasterAdminProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.getDescriptor(); + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.getDescriptor(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse buildParsed() + private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { - org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse result = buildPartial(); + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException( result).asInvalidProtocolBufferException(); @@ -10512,28 +15967,70 @@ public final class MasterAdminProtos { return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse(this); + int from_bitField0_ = bitField0_; + if (tableSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + tableSchema_ = java.util.Collections.unmodifiableList(tableSchema_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.tableSchema_ = tableSchema_; + } else { + result.tableSchema_ = tableSchemaBuilder_.build(); + } onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.getDefaultInstance()) return this; + if (tableSchemaBuilder_ == null) { + if (!other.tableSchema_.isEmpty()) { + if (tableSchema_.isEmpty()) { + tableSchema_ = other.tableSchema_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureTableSchemaIsMutable(); + tableSchema_.addAll(other.tableSchema_); + } + onChanged(); + } + } else { + if (!other.tableSchema_.isEmpty()) { + if (tableSchemaBuilder_.isEmpty()) { + tableSchemaBuilder_.dispose(); + tableSchemaBuilder_ = null; + tableSchema_ = other.tableSchema_; + bitField0_ = (bitField0_ & ~0x00000001); + tableSchemaBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableSchemaFieldBuilder() : null; + } else { + tableSchemaBuilder_.addAllMessages(other.tableSchema_); + } + } + } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + for (int i = 0; i < getTableSchemaCount(); i++) { + if (!getTableSchema(i).isInitialized()) { + + return false; + } + } return true; } @@ -10560,20 +16057,213 @@ public final class MasterAdminProtos { } break; } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addTableSchema(subBuilder.buildPartial()); + break; + } } } } + private int bitField0_; - // @@protoc_insertion_point(builder_scope:ModifyTableResponse) + // repeated .TableSchema tableSchema = 1; + private java.util.List tableSchema_ = + java.util.Collections.emptyList(); + private void ensureTableSchemaIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + tableSchema_ = new java.util.ArrayList(tableSchema_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> tableSchemaBuilder_; + + public java.util.List getTableSchemaList() { + if (tableSchemaBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableSchema_); + } else { + return tableSchemaBuilder_.getMessageList(); + } + } + public int getTableSchemaCount() { + if (tableSchemaBuilder_ == null) { + return tableSchema_.size(); + } else { + return tableSchemaBuilder_.getCount(); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema(int index) { + if (tableSchemaBuilder_ == null) { + return tableSchema_.get(index); + } else { + return tableSchemaBuilder_.getMessage(index); + } + } + public Builder setTableSchema( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableSchemaIsMutable(); + tableSchema_.set(index, value); + onChanged(); + } else { + tableSchemaBuilder_.setMessage(index, value); + } + return this; + } + public Builder setTableSchema( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (tableSchemaBuilder_ == null) { + ensureTableSchemaIsMutable(); + tableSchema_.set(index, builderForValue.build()); + onChanged(); + } else { + tableSchemaBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableSchemaIsMutable(); + tableSchema_.add(value); + onChanged(); + } else { + tableSchemaBuilder_.addMessage(value); + } + return this; + } + public Builder addTableSchema( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableSchemaIsMutable(); + tableSchema_.add(index, value); + onChanged(); + } else { + tableSchemaBuilder_.addMessage(index, value); + } + return this; + } + public Builder addTableSchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (tableSchemaBuilder_ == null) { + ensureTableSchemaIsMutable(); + tableSchema_.add(builderForValue.build()); + onChanged(); + } else { + tableSchemaBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addTableSchema( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (tableSchemaBuilder_ == null) { + ensureTableSchemaIsMutable(); + tableSchema_.add(index, builderForValue.build()); + onChanged(); + } else { + tableSchemaBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + public Builder addAllTableSchema( + java.lang.Iterable values) { + if (tableSchemaBuilder_ == null) { + ensureTableSchemaIsMutable(); + super.addAll(values, tableSchema_); + onChanged(); + } else { + tableSchemaBuilder_.addAllMessages(values); + } + return this; + } + public Builder clearTableSchema() { + if (tableSchemaBuilder_ == null) { + tableSchema_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + tableSchemaBuilder_.clear(); + } + return this; + } + public Builder removeTableSchema(int index) { + if (tableSchemaBuilder_ == null) { + ensureTableSchemaIsMutable(); + tableSchema_.remove(index); + onChanged(); + } else { + tableSchemaBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getTableSchemaBuilder( + int index) { + return getTableSchemaFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder( + int index) { + if (tableSchemaBuilder_ == null) { + return tableSchema_.get(index); } else { + return tableSchemaBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getTableSchemaOrBuilderList() { + if (tableSchemaBuilder_ != null) { + return tableSchemaBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableSchema_); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder addTableSchemaBuilder() { + return getTableSchemaFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder addTableSchemaBuilder( + int index) { + return getTableSchemaFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()); + } + public java.util.List + getTableSchemaBuilderList() { + return getTableSchemaFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getTableSchemaFieldBuilder() { + if (tableSchemaBuilder_ == null) { + tableSchemaBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + tableSchema_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + tableSchema_ = null; + } + return tableSchemaBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetTableDescriptorsByNamespaceResponse) } static { - defaultInstance = new ModifyTableResponse(true); + defaultInstance = new GetTableDescriptorsByNamespaceResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:ModifyTableResponse) + // @@protoc_insertion_point(class_scope:GetTableDescriptorsByNamespaceResponse) } public interface ShutdownRequestOrBuilder @@ -20651,6 +26341,36 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, com.google.protobuf.RpcCallback done); + public abstract void modifyNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void createNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void deleteNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void getNamespaceDescriptor( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void listNamespaceDescriptors( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void getTableDescriptorsByNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -20841,43 +26561,91 @@ public final class MasterAdminProtos { } @java.lang.Override - public void deleteSnapshot( + public void deleteSnapshot( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest request, + com.google.protobuf.RpcCallback done) { + impl.deleteSnapshot(controller, request, done); + } + + @java.lang.Override + public void isSnapshotDone( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest request, + com.google.protobuf.RpcCallback done) { + impl.isSnapshotDone(controller, request, done); + } + + @java.lang.Override + public void restoreSnapshot( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest request, + com.google.protobuf.RpcCallback done) { + impl.restoreSnapshot(controller, request, done); + } + + @java.lang.Override + public void isRestoreSnapshotDone( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest request, + com.google.protobuf.RpcCallback done) { + impl.isRestoreSnapshotDone(controller, request, done); + } + + @java.lang.Override + public void isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, + com.google.protobuf.RpcCallback done) { + impl.isMasterRunning(controller, request, done); + } + + @java.lang.Override + public void modifyNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest request, + com.google.protobuf.RpcCallback done) { + impl.modifyNamespace(controller, request, done); + } + + @java.lang.Override + public void createNamespace( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest request, - com.google.protobuf.RpcCallback done) { - impl.deleteSnapshot(controller, request, done); + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest request, + com.google.protobuf.RpcCallback done) { + impl.createNamespace(controller, request, done); } @java.lang.Override - public void isSnapshotDone( + public void deleteNamespace( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest request, - com.google.protobuf.RpcCallback done) { - impl.isSnapshotDone(controller, request, done); + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest request, + com.google.protobuf.RpcCallback done) { + impl.deleteNamespace(controller, request, done); } @java.lang.Override - public void restoreSnapshot( + public void getNamespaceDescriptor( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest request, - com.google.protobuf.RpcCallback done) { - impl.restoreSnapshot(controller, request, done); + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest request, + com.google.protobuf.RpcCallback done) { + impl.getNamespaceDescriptor(controller, request, done); } @java.lang.Override - public void isRestoreSnapshotDone( + public void listNamespaceDescriptors( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest request, - com.google.protobuf.RpcCallback done) { - impl.isRestoreSnapshotDone(controller, request, done); + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest request, + com.google.protobuf.RpcCallback done) { + impl.listNamespaceDescriptors(controller, request, done); } @java.lang.Override - public void isMasterRunning( + public void getTableDescriptorsByNamespace( com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, - com.google.protobuf.RpcCallback done) { - impl.isMasterRunning(controller, request, done); + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest request, + com.google.protobuf.RpcCallback done) { + impl.getTableDescriptorsByNamespace(controller, request, done); } }; @@ -20958,6 +26726,18 @@ public final class MasterAdminProtos { return impl.isRestoreSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest)request); case 27: return impl.isMasterRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)request); + case 28: + return impl.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest)request); + case 29: + return impl.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest)request); + case 30: + return impl.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest)request); + case 31: + return impl.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest)request); + case 32: + return impl.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest)request); + case 33: + return impl.getTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -21028,6 +26808,18 @@ public final class MasterAdminProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest.getDefaultInstance(); case 27: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance(); + case 28: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest.getDefaultInstance(); + case 29: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest.getDefaultInstance(); + case 30: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest.getDefaultInstance(); + case 31: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); + case 32: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); + case 33: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -21098,6 +26890,18 @@ public final class MasterAdminProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance(); case 27: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(); + case 28: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse.getDefaultInstance(); + case 29: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse.getDefaultInstance(); + case 30: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse.getDefaultInstance(); + case 31: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); + case 32: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); + case 33: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -21246,6 +27050,36 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, com.google.protobuf.RpcCallback done); + public abstract void modifyNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void createNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void deleteNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void getNamespaceDescriptor( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void listNamespaceDescriptors( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest request, + com.google.protobuf.RpcCallback done); + + public abstract void getTableDescriptorsByNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -21408,6 +27242,36 @@ public final class MasterAdminProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 28: + this.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 29: + this.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 30: + this.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 31: + this.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 32: + this.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 33: + this.getTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -21478,6 +27342,18 @@ public final class MasterAdminProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest.getDefaultInstance(); case 27: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance(); + case 28: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest.getDefaultInstance(); + case 29: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest.getDefaultInstance(); + case 30: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest.getDefaultInstance(); + case 31: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); + case 32: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); + case 33: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -21548,6 +27424,18 @@ public final class MasterAdminProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance(); case 27: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(); + case 28: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse.getDefaultInstance(); + case 29: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse.getDefaultInstance(); + case 30: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse.getDefaultInstance(); + case 31: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); + case 32: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); + case 33: + return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -21988,6 +27876,96 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance())); } + + public void modifyNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(28), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse.getDefaultInstance())); + } + + public void createNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(29), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse.getDefaultInstance())); + } + + public void deleteNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(30), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse.getDefaultInstance())); + } + + public void getNamespaceDescriptor( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(31), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse.getDefaultInstance())); + } + + public void listNamespaceDescriptors( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(32), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse.getDefaultInstance())); + } + + public void getTableDescriptorsByNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(33), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -22135,6 +28113,36 @@ public final class MasterAdminProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse modifyNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse createNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse deleteNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse getNamespaceDescriptor( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse listNamespaceDescriptors( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse getTableDescriptorsByNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -22479,6 +28487,78 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse modifyNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(28), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse createNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(29), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse deleteNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(30), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse getNamespaceDescriptor( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(31), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse listNamespaceDescriptors( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(32), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse getTableDescriptorsByNamespace( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(33), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.getDefaultInstance()); + } + } } @@ -22613,6 +28693,66 @@ public final class MasterAdminProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_ModifyTableResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor + internal_static_CreateNamespaceRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_CreateNamespaceRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_CreateNamespaceResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_CreateNamespaceResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_DeleteNamespaceRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_DeleteNamespaceRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_DeleteNamespaceResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_DeleteNamespaceResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ModifyNamespaceRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ModifyNamespaceRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetNamespaceDescriptorRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetNamespaceDescriptorRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetNamespaceDescriptorResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetNamespaceDescriptorResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ModifyNamespaceResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ModifyNamespaceResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ListNamespaceDescriptorsRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ListNamespaceDescriptorsRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ListNamespaceDescriptorsResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ListNamespaceDescriptorsResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetTableDescriptorsByNamespaceRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetTableDescriptorsByNamespaceRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetTableDescriptorsByNamespaceResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetTableDescriptorsByNamespaceResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_ShutdownRequest_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -22752,116 +28892,148 @@ public final class MasterAdminProtos { static { java.lang.String[] descriptorData = { "\n\021MasterAdmin.proto\032\014Master.proto\032\013hbase" + - ".proto\032\014Client.proto\"T\n\020AddColumnRequest" + - "\022\022\n\ntable_name\030\001 \002(\014\022,\n\017column_families\030" + - "\002 \002(\0132\023.ColumnFamilySchema\"\023\n\021AddColumnR" + - "esponse\">\n\023DeleteColumnRequest\022\022\n\ntable_" + - "name\030\001 \002(\014\022\023\n\013column_name\030\002 \002(\014\"\026\n\024Delet" + - "eColumnResponse\"W\n\023ModifyColumnRequest\022\022" + - "\n\ntable_name\030\001 \002(\014\022,\n\017column_families\030\002 " + - "\002(\0132\023.ColumnFamilySchema\"\026\n\024ModifyColumn" + - "Response\"\\\n\021MoveRegionRequest\022 \n\006region\030", - "\001 \002(\0132\020.RegionSpecifier\022%\n\020dest_server_n" + - "ame\030\002 \001(\0132\013.ServerName\"\024\n\022MoveRegionResp" + - "onse\"\200\001\n\035DispatchMergingRegionsRequest\022\"" + - "\n\010region_a\030\001 \002(\0132\020.RegionSpecifier\022\"\n\010re" + - "gion_b\030\002 \002(\0132\020.RegionSpecifier\022\027\n\010forcib" + - "le\030\003 \001(\010:\005false\" \n\036DispatchMergingRegion" + - "sResponse\"7\n\023AssignRegionRequest\022 \n\006regi" + - "on\030\001 \002(\0132\020.RegionSpecifier\"\026\n\024AssignRegi" + - "onResponse\"O\n\025UnassignRegionRequest\022 \n\006r" + - "egion\030\001 \002(\0132\020.RegionSpecifier\022\024\n\005force\030\002", - " \001(\010:\005false\"\030\n\026UnassignRegionResponse\"8\n" + - "\024OfflineRegionRequest\022 \n\006region\030\001 \002(\0132\020." + - "RegionSpecifier\"\027\n\025OfflineRegionResponse" + - "\"L\n\022CreateTableRequest\022\"\n\014table_schema\030\001" + - " \002(\0132\014.TableSchema\022\022\n\nsplit_keys\030\002 \003(\014\"\025" + - "\n\023CreateTableResponse\"(\n\022DeleteTableRequ" + - "est\022\022\n\ntable_name\030\001 \002(\014\"\025\n\023DeleteTableRe" + - "sponse\"(\n\022EnableTableRequest\022\022\n\ntable_na" + - "me\030\001 \002(\014\"\025\n\023EnableTableResponse\")\n\023Disab" + - "leTableRequest\022\022\n\ntable_name\030\001 \002(\014\"\026\n\024Di", - "sableTableResponse\"L\n\022ModifyTableRequest" + - "\022\022\n\ntable_name\030\001 \002(\014\022\"\n\014table_schema\030\002 \002" + - "(\0132\014.TableSchema\"\025\n\023ModifyTableResponse\"" + - "\021\n\017ShutdownRequest\"\022\n\020ShutdownResponse\"\023" + - "\n\021StopMasterRequest\"\024\n\022StopMasterRespons" + - "e\"\020\n\016BalanceRequest\"\'\n\017BalanceResponse\022\024" + - "\n\014balancer_ran\030\001 \002(\010\"<\n\031SetBalancerRunni" + - "ngRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001" + - "(\010\"8\n\032SetBalancerRunningResponse\022\032\n\022prev" + - "_balance_value\030\001 \001(\010\"\024\n\022CatalogScanReque", - "st\"*\n\023CatalogScanResponse\022\023\n\013scan_result" + - "\030\001 \001(\005\"-\n\033EnableCatalogJanitorRequest\022\016\n" + - "\006enable\030\001 \002(\010\"2\n\034EnableCatalogJanitorRes" + - "ponse\022\022\n\nprev_value\030\001 \001(\010\" \n\036IsCatalogJa" + - "nitorEnabledRequest\"0\n\037IsCatalogJanitorE" + - "nabledResponse\022\r\n\005value\030\001 \002(\010\"=\n\023TakeSna" + - "pshotRequest\022&\n\010snapshot\030\001 \002(\0132\024.Snapsho" + - "tDescription\"0\n\024TakeSnapshotResponse\022\030\n\020" + - "expected_timeout\030\001 \002(\003\"\025\n\023ListSnapshotRe" + - "quest\"?\n\024ListSnapshotResponse\022\'\n\tsnapsho", - "ts\030\001 \003(\0132\024.SnapshotDescription\"?\n\025Delete" + - "SnapshotRequest\022&\n\010snapshot\030\001 \002(\0132\024.Snap" + - "shotDescription\"\030\n\026DeleteSnapshotRespons" + - "e\"@\n\026RestoreSnapshotRequest\022&\n\010snapshot\030" + - "\001 \002(\0132\024.SnapshotDescription\"\031\n\027RestoreSn" + - "apshotResponse\"?\n\025IsSnapshotDoneRequest\022" + - "&\n\010snapshot\030\001 \001(\0132\024.SnapshotDescription\"" + - "U\n\026IsSnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:" + - "\005false\022&\n\010snapshot\030\002 \001(\0132\024.SnapshotDescr" + - "iption\"F\n\034IsRestoreSnapshotDoneRequest\022&", - "\n\010snapshot\030\001 \001(\0132\024.SnapshotDescription\"3" + - "\n\035IsRestoreSnapshotDoneResponse\022\022\n\004done\030" + - "\001 \001(\010:\004true2\305\016\n\022MasterAdminService\0222\n\tAd" + - "dColumn\022\021.AddColumnRequest\032\022.AddColumnRe" + - "sponse\022;\n\014DeleteColumn\022\024.DeleteColumnReq" + - "uest\032\025.DeleteColumnResponse\022;\n\014ModifyCol" + - "umn\022\024.ModifyColumnRequest\032\025.ModifyColumn" + - "Response\0225\n\nMoveRegion\022\022.MoveRegionReque" + - "st\032\023.MoveRegionResponse\022Y\n\026DispatchMergi" + - "ngRegions\022\036.DispatchMergingRegionsReques", - "t\032\037.DispatchMergingRegionsResponse\022;\n\014As" + - "signRegion\022\024.AssignRegionRequest\032\025.Assig" + - "nRegionResponse\022A\n\016UnassignRegion\022\026.Unas" + - "signRegionRequest\032\027.UnassignRegionRespon" + - "se\022>\n\rOfflineRegion\022\025.OfflineRegionReque" + - "st\032\026.OfflineRegionResponse\0228\n\013DeleteTabl" + - "e\022\023.DeleteTableRequest\032\024.DeleteTableResp" + - "onse\0228\n\013EnableTable\022\023.EnableTableRequest" + - "\032\024.EnableTableResponse\022;\n\014DisableTable\022\024" + - ".DisableTableRequest\032\025.DisableTableRespo", - "nse\0228\n\013ModifyTable\022\023.ModifyTableRequest\032" + - "\024.ModifyTableResponse\0228\n\013CreateTable\022\023.C" + - "reateTableRequest\032\024.CreateTableResponse\022" + - "/\n\010Shutdown\022\020.ShutdownRequest\032\021.Shutdown" + - "Response\0225\n\nStopMaster\022\022.StopMasterReque" + - "st\032\023.StopMasterResponse\022,\n\007Balance\022\017.Bal" + - "anceRequest\032\020.BalanceResponse\022M\n\022SetBala" + - "ncerRunning\022\032.SetBalancerRunningRequest\032" + - "\033.SetBalancerRunningResponse\022;\n\016RunCatal" + - "ogScan\022\023.CatalogScanRequest\032\024.CatalogSca", - "nResponse\022S\n\024EnableCatalogJanitor\022\034.Enab" + - "leCatalogJanitorRequest\032\035.EnableCatalogJ" + - "anitorResponse\022\\\n\027IsCatalogJanitorEnable" + - "d\022\037.IsCatalogJanitorEnabledRequest\032 .IsC" + - "atalogJanitorEnabledResponse\022L\n\021ExecMast" + - "erService\022\032.CoprocessorServiceRequest\032\033." + - "CoprocessorServiceResponse\0227\n\010Snapshot\022\024" + - ".TakeSnapshotRequest\032\025.TakeSnapshotRespo" + - "nse\022D\n\025GetCompletedSnapshots\022\024.ListSnaps" + - "hotRequest\032\025.ListSnapshotResponse\022A\n\016Del", - "eteSnapshot\022\026.DeleteSnapshotRequest\032\027.De" + - "leteSnapshotResponse\022A\n\016IsSnapshotDone\022\026" + - ".IsSnapshotDoneRequest\032\027.IsSnapshotDoneR" + - "esponse\022D\n\017RestoreSnapshot\022\027.RestoreSnap" + - "shotRequest\032\030.RestoreSnapshotResponse\022V\n" + - "\025IsRestoreSnapshotDone\022\035.IsRestoreSnapsh" + - "otDoneRequest\032\036.IsRestoreSnapshotDoneRes" + - "ponse\022D\n\017IsMasterRunning\022\027.IsMasterRunni" + - "ngRequest\032\030.IsMasterRunningResponseBG\n*o" + - "rg.apache.hadoop.hbase.protobuf.generate", - "dB\021MasterAdminProtosH\001\210\001\001\240\001\001" + ".proto\032\014Client.proto\"`\n\020AddColumnRequest" + + "\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\022,\n\017colu" + + "mn_families\030\002 \002(\0132\023.ColumnFamilySchema\"\023" + + "\n\021AddColumnResponse\"J\n\023DeleteColumnReque" + + "st\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\022\023\n\013co" + + "lumn_name\030\002 \002(\014\"\026\n\024DeleteColumnResponse\"" + + "c\n\023ModifyColumnRequest\022\036\n\ntable_name\030\001 \002" + + "(\0132\n.TableName\022,\n\017column_families\030\002 \002(\0132" + + "\023.ColumnFamilySchema\"\026\n\024ModifyColumnResp", + "onse\"\\\n\021MoveRegionRequest\022 \n\006region\030\001 \002(" + + "\0132\020.RegionSpecifier\022%\n\020dest_server_name\030" + + "\002 \001(\0132\013.ServerName\"\024\n\022MoveRegionResponse" + + "\"\200\001\n\035DispatchMergingRegionsRequest\022\"\n\010re" + + "gion_a\030\001 \002(\0132\020.RegionSpecifier\022\"\n\010region" + + "_b\030\002 \002(\0132\020.RegionSpecifier\022\027\n\010forcible\030\003" + + " \001(\010:\005false\" \n\036DispatchMergingRegionsRes" + + "ponse\"7\n\023AssignRegionRequest\022 \n\006region\030\001" + + " \002(\0132\020.RegionSpecifier\"\026\n\024AssignRegionRe" + + "sponse\"O\n\025UnassignRegionRequest\022 \n\006regio", + "n\030\001 \002(\0132\020.RegionSpecifier\022\024\n\005force\030\002 \001(\010" + + ":\005false\"\030\n\026UnassignRegionResponse\"8\n\024Off" + + "lineRegionRequest\022 \n\006region\030\001 \002(\0132\020.Regi" + + "onSpecifier\"\027\n\025OfflineRegionResponse\"L\n\022" + + "CreateTableRequest\022\"\n\014table_schema\030\001 \002(\013" + + "2\014.TableSchema\022\022\n\nsplit_keys\030\002 \003(\014\"\025\n\023Cr" + + "eateTableResponse\"4\n\022DeleteTableRequest\022" + + "\036\n\ntable_name\030\001 \002(\0132\n.TableName\"\025\n\023Delet" + + "eTableResponse\"4\n\022EnableTableRequest\022\036\n\n" + + "table_name\030\001 \002(\0132\n.TableName\"\025\n\023EnableTa", + "bleResponse\"5\n\023DisableTableRequest\022\036\n\nta" + + "ble_name\030\001 \002(\0132\n.TableName\"\026\n\024DisableTab" + + "leResponse\"X\n\022ModifyTableRequest\022\036\n\ntabl" + + "e_name\030\001 \002(\0132\n.TableName\022\"\n\014table_schema" + + "\030\002 \002(\0132\014.TableSchema\"\025\n\023ModifyTableRespo" + + "nse\"K\n\026CreateNamespaceRequest\0221\n\023namespa" + + "ceDescriptor\030\001 \002(\0132\024.NamespaceDescriptor" + + "\"\031\n\027CreateNamespaceResponse\"/\n\026DeleteNam" + + "espaceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"\031\n\027" + + "DeleteNamespaceResponse\"K\n\026ModifyNamespa", + "ceRequest\0221\n\023namespaceDescriptor\030\001 \002(\0132\024" + + ".NamespaceDescriptor\"6\n\035GetNamespaceDesc" + + "riptorRequest\022\025\n\rnamespaceName\030\001 \002(\t\"S\n\036" + + "GetNamespaceDescriptorResponse\0221\n\023namesp" + + "aceDescriptor\030\001 \002(\0132\024.NamespaceDescripto" + + "r\"\031\n\027ModifyNamespaceResponse\"!\n\037ListName" + + "spaceDescriptorsRequest\"U\n ListNamespace" + + "DescriptorsResponse\0221\n\023namespaceDescript" + + "or\030\001 \003(\0132\024.NamespaceDescriptor\">\n%GetTab" + + "leDescriptorsByNamespaceRequest\022\025\n\rnames", + "paceName\030\001 \002(\t\"K\n&GetTableDescriptorsByN" + + "amespaceResponse\022!\n\013tableSchema\030\001 \003(\0132\014." + + "TableSchema\"\021\n\017ShutdownRequest\"\022\n\020Shutdo" + + "wnResponse\"\023\n\021StopMasterRequest\"\024\n\022StopM" + + "asterResponse\"\020\n\016BalanceRequest\"\'\n\017Balan" + + "ceResponse\022\024\n\014balancer_ran\030\001 \002(\010\"<\n\031SetB" + + "alancerRunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013syn" + + "chronous\030\002 \001(\010\"8\n\032SetBalancerRunningResp" + + "onse\022\032\n\022prev_balance_value\030\001 \001(\010\"\024\n\022Cata" + + "logScanRequest\"*\n\023CatalogScanResponse\022\023\n", + "\013scan_result\030\001 \001(\005\"-\n\033EnableCatalogJanit" + + "orRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034EnableCatal" + + "ogJanitorResponse\022\022\n\nprev_value\030\001 \001(\010\" \n" + + "\036IsCatalogJanitorEnabledRequest\"0\n\037IsCat" + + "alogJanitorEnabledResponse\022\r\n\005value\030\001 \002(" + + "\010\"=\n\023TakeSnapshotRequest\022&\n\010snapshot\030\001 \002" + + "(\0132\024.SnapshotDescription\"0\n\024TakeSnapshot" + + "Response\022\030\n\020expected_timeout\030\001 \002(\003\"\025\n\023Li" + + "stSnapshotRequest\"?\n\024ListSnapshotRespons" + + "e\022\'\n\tsnapshots\030\001 \003(\0132\024.SnapshotDescripti", + "on\"?\n\025DeleteSnapshotRequest\022&\n\010snapshot\030" + + "\001 \002(\0132\024.SnapshotDescription\"\030\n\026DeleteSna" + + "pshotResponse\"@\n\026RestoreSnapshotRequest\022" + + "&\n\010snapshot\030\001 \002(\0132\024.SnapshotDescription\"" + + "\031\n\027RestoreSnapshotResponse\"?\n\025IsSnapshot" + + "DoneRequest\022&\n\010snapshot\030\001 \001(\0132\024.Snapshot" + + "Description\"U\n\026IsSnapshotDoneResponse\022\023\n" + + "\004done\030\001 \001(\010:\005false\022&\n\010snapshot\030\002 \001(\0132\024.S" + + "napshotDescription\"F\n\034IsRestoreSnapshotD" + + "oneRequest\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotD", + "escription\"3\n\035IsRestoreSnapshotDoneRespo" + + "nse\022\022\n\004done\030\001 \001(\010:\004true2\306\022\n\022MasterAdminS" + + "ervice\0222\n\tAddColumn\022\021.AddColumnRequest\032\022" + + ".AddColumnResponse\022;\n\014DeleteColumn\022\024.Del" + + "eteColumnRequest\032\025.DeleteColumnResponse\022" + + ";\n\014ModifyColumn\022\024.ModifyColumnRequest\032\025." + + "ModifyColumnResponse\0225\n\nMoveRegion\022\022.Mov" + + "eRegionRequest\032\023.MoveRegionResponse\022Y\n\026D" + + "ispatchMergingRegions\022\036.DispatchMergingR" + + "egionsRequest\032\037.DispatchMergingRegionsRe", + "sponse\022;\n\014AssignRegion\022\024.AssignRegionReq" + + "uest\032\025.AssignRegionResponse\022A\n\016UnassignR" + + "egion\022\026.UnassignRegionRequest\032\027.Unassign" + + "RegionResponse\022>\n\rOfflineRegion\022\025.Offlin" + + "eRegionRequest\032\026.OfflineRegionResponse\0228" + + "\n\013DeleteTable\022\023.DeleteTableRequest\032\024.Del" + + "eteTableResponse\0228\n\013EnableTable\022\023.Enable" + + "TableRequest\032\024.EnableTableResponse\022;\n\014Di" + + "sableTable\022\024.DisableTableRequest\032\025.Disab" + + "leTableResponse\0228\n\013ModifyTable\022\023.ModifyT", + "ableRequest\032\024.ModifyTableResponse\0228\n\013Cre" + + "ateTable\022\023.CreateTableRequest\032\024.CreateTa" + + "bleResponse\022/\n\010Shutdown\022\020.ShutdownReques" + + "t\032\021.ShutdownResponse\0225\n\nStopMaster\022\022.Sto" + + "pMasterRequest\032\023.StopMasterResponse\022,\n\007B" + + "alance\022\017.BalanceRequest\032\020.BalanceRespons" + + "e\022M\n\022SetBalancerRunning\022\032.SetBalancerRun" + + "ningRequest\032\033.SetBalancerRunningResponse" + + "\022;\n\016RunCatalogScan\022\023.CatalogScanRequest\032" + + "\024.CatalogScanResponse\022S\n\024EnableCatalogJa", + "nitor\022\034.EnableCatalogJanitorRequest\032\035.En" + + "ableCatalogJanitorResponse\022\\\n\027IsCatalogJ" + + "anitorEnabled\022\037.IsCatalogJanitorEnabledR" + + "equest\032 .IsCatalogJanitorEnabledResponse" + + "\022L\n\021ExecMasterService\022\032.CoprocessorServi" + + "ceRequest\032\033.CoprocessorServiceResponse\0227" + + "\n\010Snapshot\022\024.TakeSnapshotRequest\032\025.TakeS" + + "napshotResponse\022D\n\025GetCompletedSnapshots" + + "\022\024.ListSnapshotRequest\032\025.ListSnapshotRes" + + "ponse\022A\n\016DeleteSnapshot\022\026.DeleteSnapshot", + "Request\032\027.DeleteSnapshotResponse\022A\n\016IsSn" + + "apshotDone\022\026.IsSnapshotDoneRequest\032\027.IsS" + + "napshotDoneResponse\022D\n\017RestoreSnapshot\022\027" + + ".RestoreSnapshotRequest\032\030.RestoreSnapsho" + + "tResponse\022V\n\025IsRestoreSnapshotDone\022\035.IsR" + + "estoreSnapshotDoneRequest\032\036.IsRestoreSna" + + "pshotDoneResponse\022D\n\017IsMasterRunning\022\027.I" + + "sMasterRunningRequest\032\030.IsMasterRunningR" + + "esponse\022D\n\017ModifyNamespace\022\027.ModifyNames" + + "paceRequest\032\030.ModifyNamespaceResponse\022D\n", + "\017CreateNamespace\022\027.CreateNamespaceReques" + + "t\032\030.CreateNamespaceResponse\022D\n\017DeleteNam" + + "espace\022\027.DeleteNamespaceRequest\032\030.Delete" + + "NamespaceResponse\022Y\n\026GetNamespaceDescrip" + + "tor\022\036.GetNamespaceDescriptorRequest\032\037.Ge" + + "tNamespaceDescriptorResponse\022_\n\030ListName" + + "spaceDescriptors\022 .ListNamespaceDescript" + + "orsRequest\032!.ListNamespaceDescriptorsRes" + + "ponse\022q\n\036GetTableDescriptorsByNamespace\022" + + "&.GetTableDescriptorsByNamespaceRequest\032", + "\'.GetTableDescriptorsByNamespaceResponse" + + "BG\n*org.apache.hadoop.hbase.protobuf.gen" + + "eratedB\021MasterAdminProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -23076,8 +29248,104 @@ public final class MasterAdminProtos { new java.lang.String[] { }, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.Builder.class); - internal_static_ShutdownRequest_descriptor = + internal_static_CreateNamespaceRequest_descriptor = getDescriptor().getMessageTypes().get(26); + internal_static_CreateNamespaceRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_CreateNamespaceRequest_descriptor, + new java.lang.String[] { "NamespaceDescriptor", }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest.Builder.class); + internal_static_CreateNamespaceResponse_descriptor = + getDescriptor().getMessageTypes().get(27); + internal_static_CreateNamespaceResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_CreateNamespaceResponse_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse.Builder.class); + internal_static_DeleteNamespaceRequest_descriptor = + getDescriptor().getMessageTypes().get(28); + internal_static_DeleteNamespaceRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_DeleteNamespaceRequest_descriptor, + new java.lang.String[] { "NamespaceName", }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest.Builder.class); + internal_static_DeleteNamespaceResponse_descriptor = + getDescriptor().getMessageTypes().get(29); + internal_static_DeleteNamespaceResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_DeleteNamespaceResponse_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse.Builder.class); + internal_static_ModifyNamespaceRequest_descriptor = + getDescriptor().getMessageTypes().get(30); + internal_static_ModifyNamespaceRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ModifyNamespaceRequest_descriptor, + new java.lang.String[] { "NamespaceDescriptor", }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest.Builder.class); + internal_static_GetNamespaceDescriptorRequest_descriptor = + getDescriptor().getMessageTypes().get(31); + internal_static_GetNamespaceDescriptorRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetNamespaceDescriptorRequest_descriptor, + new java.lang.String[] { "NamespaceName", }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest.Builder.class); + internal_static_GetNamespaceDescriptorResponse_descriptor = + getDescriptor().getMessageTypes().get(32); + internal_static_GetNamespaceDescriptorResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetNamespaceDescriptorResponse_descriptor, + new java.lang.String[] { "NamespaceDescriptor", }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse.Builder.class); + internal_static_ModifyNamespaceResponse_descriptor = + getDescriptor().getMessageTypes().get(33); + internal_static_ModifyNamespaceResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ModifyNamespaceResponse_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse.Builder.class); + internal_static_ListNamespaceDescriptorsRequest_descriptor = + getDescriptor().getMessageTypes().get(34); + internal_static_ListNamespaceDescriptorsRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ListNamespaceDescriptorsRequest_descriptor, + new java.lang.String[] { }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest.Builder.class); + internal_static_ListNamespaceDescriptorsResponse_descriptor = + getDescriptor().getMessageTypes().get(35); + internal_static_ListNamespaceDescriptorsResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ListNamespaceDescriptorsResponse_descriptor, + new java.lang.String[] { "NamespaceDescriptor", }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse.Builder.class); + internal_static_GetTableDescriptorsByNamespaceRequest_descriptor = + getDescriptor().getMessageTypes().get(36); + internal_static_GetTableDescriptorsByNamespaceRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetTableDescriptorsByNamespaceRequest_descriptor, + new java.lang.String[] { "NamespaceName", }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceRequest.Builder.class); + internal_static_GetTableDescriptorsByNamespaceResponse_descriptor = + getDescriptor().getMessageTypes().get(37); + internal_static_GetTableDescriptorsByNamespaceResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetTableDescriptorsByNamespaceResponse_descriptor, + new java.lang.String[] { "TableSchema", }, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.Builder.class); + internal_static_ShutdownRequest_descriptor = + getDescriptor().getMessageTypes().get(38); internal_static_ShutdownRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ShutdownRequest_descriptor, @@ -23085,7 +29353,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest.Builder.class); internal_static_ShutdownResponse_descriptor = - getDescriptor().getMessageTypes().get(27); + getDescriptor().getMessageTypes().get(39); internal_static_ShutdownResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ShutdownResponse_descriptor, @@ -23093,7 +29361,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse.Builder.class); internal_static_StopMasterRequest_descriptor = - getDescriptor().getMessageTypes().get(28); + getDescriptor().getMessageTypes().get(40); internal_static_StopMasterRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_StopMasterRequest_descriptor, @@ -23101,7 +29369,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest.Builder.class); internal_static_StopMasterResponse_descriptor = - getDescriptor().getMessageTypes().get(29); + getDescriptor().getMessageTypes().get(41); internal_static_StopMasterResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_StopMasterResponse_descriptor, @@ -23109,7 +29377,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse.Builder.class); internal_static_BalanceRequest_descriptor = - getDescriptor().getMessageTypes().get(30); + getDescriptor().getMessageTypes().get(42); internal_static_BalanceRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_BalanceRequest_descriptor, @@ -23117,7 +29385,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest.Builder.class); internal_static_BalanceResponse_descriptor = - getDescriptor().getMessageTypes().get(31); + getDescriptor().getMessageTypes().get(43); internal_static_BalanceResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_BalanceResponse_descriptor, @@ -23125,7 +29393,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse.Builder.class); internal_static_SetBalancerRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(32); + getDescriptor().getMessageTypes().get(44); internal_static_SetBalancerRunningRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SetBalancerRunningRequest_descriptor, @@ -23133,7 +29401,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest.Builder.class); internal_static_SetBalancerRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(33); + getDescriptor().getMessageTypes().get(45); internal_static_SetBalancerRunningResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SetBalancerRunningResponse_descriptor, @@ -23141,7 +29409,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.Builder.class); internal_static_CatalogScanRequest_descriptor = - getDescriptor().getMessageTypes().get(34); + getDescriptor().getMessageTypes().get(46); internal_static_CatalogScanRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CatalogScanRequest_descriptor, @@ -23149,7 +29417,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.Builder.class); internal_static_CatalogScanResponse_descriptor = - getDescriptor().getMessageTypes().get(35); + getDescriptor().getMessageTypes().get(47); internal_static_CatalogScanResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CatalogScanResponse_descriptor, @@ -23157,7 +29425,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.Builder.class); internal_static_EnableCatalogJanitorRequest_descriptor = - getDescriptor().getMessageTypes().get(36); + getDescriptor().getMessageTypes().get(48); internal_static_EnableCatalogJanitorRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_EnableCatalogJanitorRequest_descriptor, @@ -23165,7 +29433,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.Builder.class); internal_static_EnableCatalogJanitorResponse_descriptor = - getDescriptor().getMessageTypes().get(37); + getDescriptor().getMessageTypes().get(49); internal_static_EnableCatalogJanitorResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_EnableCatalogJanitorResponse_descriptor, @@ -23173,7 +29441,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.Builder.class); internal_static_IsCatalogJanitorEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(38); + getDescriptor().getMessageTypes().get(50); internal_static_IsCatalogJanitorEnabledRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsCatalogJanitorEnabledRequest_descriptor, @@ -23181,7 +29449,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.Builder.class); internal_static_IsCatalogJanitorEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(39); + getDescriptor().getMessageTypes().get(51); internal_static_IsCatalogJanitorEnabledResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsCatalogJanitorEnabledResponse_descriptor, @@ -23189,7 +29457,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.Builder.class); internal_static_TakeSnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(40); + getDescriptor().getMessageTypes().get(52); internal_static_TakeSnapshotRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TakeSnapshotRequest_descriptor, @@ -23197,7 +29465,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest.Builder.class); internal_static_TakeSnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(41); + getDescriptor().getMessageTypes().get(53); internal_static_TakeSnapshotResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TakeSnapshotResponse_descriptor, @@ -23205,7 +29473,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.Builder.class); internal_static_ListSnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(42); + getDescriptor().getMessageTypes().get(54); internal_static_ListSnapshotRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ListSnapshotRequest_descriptor, @@ -23213,7 +29481,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest.Builder.class); internal_static_ListSnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(43); + getDescriptor().getMessageTypes().get(55); internal_static_ListSnapshotResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ListSnapshotResponse_descriptor, @@ -23221,7 +29489,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.Builder.class); internal_static_DeleteSnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(44); + getDescriptor().getMessageTypes().get(56); internal_static_DeleteSnapshotRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DeleteSnapshotRequest_descriptor, @@ -23229,7 +29497,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest.Builder.class); internal_static_DeleteSnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(45); + getDescriptor().getMessageTypes().get(57); internal_static_DeleteSnapshotResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DeleteSnapshotResponse_descriptor, @@ -23237,7 +29505,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.Builder.class); internal_static_RestoreSnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(46); + getDescriptor().getMessageTypes().get(58); internal_static_RestoreSnapshotRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RestoreSnapshotRequest_descriptor, @@ -23245,7 +29513,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest.Builder.class); internal_static_RestoreSnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(47); + getDescriptor().getMessageTypes().get(59); internal_static_RestoreSnapshotResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RestoreSnapshotResponse_descriptor, @@ -23253,7 +29521,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse.Builder.class); internal_static_IsSnapshotDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(48); + getDescriptor().getMessageTypes().get(60); internal_static_IsSnapshotDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsSnapshotDoneRequest_descriptor, @@ -23261,7 +29529,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest.Builder.class); internal_static_IsSnapshotDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(49); + getDescriptor().getMessageTypes().get(61); internal_static_IsSnapshotDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsSnapshotDoneResponse_descriptor, @@ -23269,7 +29537,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.Builder.class); internal_static_IsRestoreSnapshotDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(50); + getDescriptor().getMessageTypes().get(62); internal_static_IsRestoreSnapshotDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsRestoreSnapshotDoneRequest_descriptor, @@ -23277,7 +29545,7 @@ public final class MasterAdminProtos { org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest.Builder.class); internal_static_IsRestoreSnapshotDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(51); + getDescriptor().getMessageTypes().get(63); internal_static_IsRestoreSnapshotDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsRestoreSnapshotDoneResponse_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java index af2b975..c152354 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java @@ -11,9 +11,10 @@ public final class MasterMonitorProtos { public interface GetSchemaAlterStatusRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bytes table_name = 1; + // required .TableName table_name = 1; boolean hasTableName(); - com.google.protobuf.ByteString getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); } public static final class GetSchemaAlterStatusRequest extends com.google.protobuf.GeneratedMessage @@ -44,18 +45,21 @@ public final class MasterMonitorProtos { } private int bitField0_; - // required bytes table_name = 1; + // required .TableName table_name = 1; public static final int TABLE_NAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString tableName_; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { return tableName_; } private void initFields() { - tableName_ = com.google.protobuf.ByteString.EMPTY; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -66,6 +70,10 @@ public final class MasterMonitorProtos { memoizedIsInitialized = 0; return false; } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -74,7 +82,7 @@ public final class MasterMonitorProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, tableName_); + output.writeMessage(1, tableName_); } getUnknownFields().writeTo(output); } @@ -87,7 +95,7 @@ public final class MasterMonitorProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, tableName_); + .computeMessageSize(1, tableName_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -238,6 +246,7 @@ public final class MasterMonitorProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); } } private static Builder create() { @@ -246,7 +255,11 @@ public final class MasterMonitorProtos { public Builder clear() { super.clear(); - tableName_ = com.google.protobuf.ByteString.EMPTY; + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -289,7 +302,11 @@ public final class MasterMonitorProtos { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.tableName_ = tableName_; + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -307,7 +324,7 @@ public final class MasterMonitorProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest.getDefaultInstance()) return this; if (other.hasTableName()) { - setTableName(other.getTableName()); + mergeTableName(other.getTableName()); } this.mergeUnknownFields(other.getUnknownFields()); return this; @@ -318,6 +335,10 @@ public final class MasterMonitorProtos { return false; } + if (!getTableName().isInitialized()) { + + return false; + } return true; } @@ -345,8 +366,12 @@ public final class MasterMonitorProtos { break; } case 10: { - bitField0_ |= 0x00000001; - tableName_ = input.readBytes(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(); + if (hasTableName()) { + subBuilder.mergeFrom(getTableName()); + } + input.readMessage(subBuilder, extensionRegistry); + setTableName(subBuilder.buildPartial()); break; } } @@ -355,29 +380,95 @@ public final class MasterMonitorProtos { private int bitField0_; - // required bytes table_name = 1; - private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + // required .TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { - return tableName_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } } - public Builder setTableName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - tableName_ = value; - onChanged(); + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; return this; } public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); - tableName_ = getDefaultInstance().getTableName(); - onChanged(); return this; } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } // @@protoc_insertion_point(builder_scope:GetSchemaAlterStatusRequest) } @@ -830,10 +921,15 @@ public final class MasterMonitorProtos { public interface GetTableDescriptorsRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // repeated string table_names = 1; - java.util.List getTableNamesList(); + // repeated .TableName table_names = 1; + java.util.List + getTableNamesList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableNames(int index); int getTableNamesCount(); - String getTableNames(int index); + java.util.List + getTableNamesOrBuilderList(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNamesOrBuilder( + int index); } public static final class GetTableDescriptorsRequest extends com.google.protobuf.GeneratedMessage @@ -863,28 +959,41 @@ public final class MasterMonitorProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.internal_static_GetTableDescriptorsRequest_fieldAccessorTable; } - // repeated string table_names = 1; + // repeated .TableName table_names = 1; public static final int TABLE_NAMES_FIELD_NUMBER = 1; - private com.google.protobuf.LazyStringList tableNames_; - public java.util.List - getTableNamesList() { + private java.util.List tableNames_; + public java.util.List getTableNamesList() { + return tableNames_; + } + public java.util.List + getTableNamesOrBuilderList() { return tableNames_; } public int getTableNamesCount() { return tableNames_.size(); } - public String getTableNames(int index) { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableNames(int index) { + return tableNames_.get(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNamesOrBuilder( + int index) { return tableNames_.get(index); } private void initFields() { - tableNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; + tableNames_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + for (int i = 0; i < getTableNamesCount(); i++) { + if (!getTableNames(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -893,7 +1002,7 @@ public final class MasterMonitorProtos { throws java.io.IOException { getSerializedSize(); for (int i = 0; i < tableNames_.size(); i++) { - output.writeBytes(1, tableNames_.getByteString(i)); + output.writeMessage(1, tableNames_.get(i)); } getUnknownFields().writeTo(output); } @@ -904,14 +1013,9 @@ public final class MasterMonitorProtos { if (size != -1) return size; size = 0; - { - int dataSize = 0; - for (int i = 0; i < tableNames_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeBytesSizeNoTag(tableNames_.getByteString(i)); - } - size += dataSize; - size += 1 * getTableNamesList().size(); + for (int i = 0; i < tableNames_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableNames_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -1059,6 +1163,7 @@ public final class MasterMonitorProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNamesFieldBuilder(); } } private static Builder create() { @@ -1067,8 +1172,12 @@ public final class MasterMonitorProtos { public Builder clear() { super.clear(); - tableNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); + if (tableNamesBuilder_ == null) { + tableNames_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + tableNamesBuilder_.clear(); + } return this; } @@ -1106,12 +1215,15 @@ public final class MasterMonitorProtos { public org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest buildPartial() { org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest(this); int from_bitField0_ = bitField0_; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - tableNames_ = new com.google.protobuf.UnmodifiableLazyStringList( - tableNames_); - bitField0_ = (bitField0_ & ~0x00000001); + if (tableNamesBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + tableNames_ = java.util.Collections.unmodifiableList(tableNames_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.tableNames_ = tableNames_; + } else { + result.tableNames_ = tableNamesBuilder_.build(); } - result.tableNames_ = tableNames_; onBuilt(); return result; } @@ -1127,21 +1239,43 @@ public final class MasterMonitorProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest.getDefaultInstance()) return this; - if (!other.tableNames_.isEmpty()) { - if (tableNames_.isEmpty()) { - tableNames_ = other.tableNames_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureTableNamesIsMutable(); - tableNames_.addAll(other.tableNames_); + if (tableNamesBuilder_ == null) { + if (!other.tableNames_.isEmpty()) { + if (tableNames_.isEmpty()) { + tableNames_ = other.tableNames_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureTableNamesIsMutable(); + tableNames_.addAll(other.tableNames_); + } + onChanged(); + } + } else { + if (!other.tableNames_.isEmpty()) { + if (tableNamesBuilder_.isEmpty()) { + tableNamesBuilder_.dispose(); + tableNamesBuilder_ = null; + tableNames_ = other.tableNames_; + bitField0_ = (bitField0_ & ~0x00000001); + tableNamesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableNamesFieldBuilder() : null; + } else { + tableNamesBuilder_.addAllMessages(other.tableNames_); + } } - onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + for (int i = 0; i < getTableNamesCount(); i++) { + if (!getTableNames(i).isInitialized()) { + + return false; + } + } return true; } @@ -1169,8 +1303,9 @@ public final class MasterMonitorProtos { break; } case 10: { - ensureTableNamesIsMutable(); - tableNames_.add(input.readBytes()); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(); + input.readMessage(subBuilder, extensionRegistry); + addTableNames(subBuilder.buildPartial()); break; } } @@ -1179,60 +1314,190 @@ public final class MasterMonitorProtos { private int bitField0_; - // repeated string table_names = 1; - private com.google.protobuf.LazyStringList tableNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; + // repeated .TableName table_names = 1; + private java.util.List tableNames_ = + java.util.Collections.emptyList(); private void ensureTableNamesIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { - tableNames_ = new com.google.protobuf.LazyStringArrayList(tableNames_); + tableNames_ = new java.util.ArrayList(tableNames_); bitField0_ |= 0x00000001; } } - public java.util.List - getTableNamesList() { - return java.util.Collections.unmodifiableList(tableNames_); + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNamesBuilder_; + + public java.util.List getTableNamesList() { + if (tableNamesBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableNames_); + } else { + return tableNamesBuilder_.getMessageList(); + } } public int getTableNamesCount() { - return tableNames_.size(); + if (tableNamesBuilder_ == null) { + return tableNames_.size(); + } else { + return tableNamesBuilder_.getCount(); + } } - public String getTableNames(int index) { - return tableNames_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableNames(int index) { + if (tableNamesBuilder_ == null) { + return tableNames_.get(index); + } else { + return tableNamesBuilder_.getMessage(index); + } } public Builder setTableNames( - int index, String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureTableNamesIsMutable(); - tableNames_.set(index, value); - onChanged(); + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNamesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNamesIsMutable(); + tableNames_.set(index, value); + onChanged(); + } else { + tableNamesBuilder_.setMessage(index, value); + } return this; } - public Builder addTableNames(String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureTableNamesIsMutable(); - tableNames_.add(value); - onChanged(); + public Builder setTableNames( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNamesBuilder_ == null) { + ensureTableNamesIsMutable(); + tableNames_.set(index, builderForValue.build()); + onChanged(); + } else { + tableNamesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + public Builder addTableNames(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNamesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNamesIsMutable(); + tableNames_.add(value); + onChanged(); + } else { + tableNamesBuilder_.addMessage(value); + } + return this; + } + public Builder addTableNames( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNamesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNamesIsMutable(); + tableNames_.add(index, value); + onChanged(); + } else { + tableNamesBuilder_.addMessage(index, value); + } + return this; + } + public Builder addTableNames( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNamesBuilder_ == null) { + ensureTableNamesIsMutable(); + tableNames_.add(builderForValue.build()); + onChanged(); + } else { + tableNamesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + public Builder addTableNames( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNamesBuilder_ == null) { + ensureTableNamesIsMutable(); + tableNames_.add(index, builderForValue.build()); + onChanged(); + } else { + tableNamesBuilder_.addMessage(index, builderForValue.build()); + } return this; } public Builder addAllTableNames( - java.lang.Iterable values) { - ensureTableNamesIsMutable(); - super.addAll(values, tableNames_); - onChanged(); + java.lang.Iterable values) { + if (tableNamesBuilder_ == null) { + ensureTableNamesIsMutable(); + super.addAll(values, tableNames_); + onChanged(); + } else { + tableNamesBuilder_.addAllMessages(values); + } return this; } public Builder clearTableNames() { - tableNames_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); + if (tableNamesBuilder_ == null) { + tableNames_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + tableNamesBuilder_.clear(); + } return this; } - void addTableNames(com.google.protobuf.ByteString value) { - ensureTableNamesIsMutable(); - tableNames_.add(value); - onChanged(); + public Builder removeTableNames(int index) { + if (tableNamesBuilder_ == null) { + ensureTableNamesIsMutable(); + tableNames_.remove(index); + onChanged(); + } else { + tableNamesBuilder_.remove(index); + } + return this; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNamesBuilder( + int index) { + return getTableNamesFieldBuilder().getBuilder(index); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNamesOrBuilder( + int index) { + if (tableNamesBuilder_ == null) { + return tableNames_.get(index); } else { + return tableNamesBuilder_.getMessageOrBuilder(index); + } + } + public java.util.List + getTableNamesOrBuilderList() { + if (tableNamesBuilder_ != null) { + return tableNamesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableNames_); + } + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNamesBuilder() { + return getTableNamesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNamesBuilder( + int index) { + return getTableNamesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + public java.util.List + getTableNamesBuilderList() { + return getTableNamesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNamesFieldBuilder() { + if (tableNamesBuilder_ == null) { + tableNamesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableNames_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + tableNames_ = null; + } + return tableNamesBuilder_; } // @@protoc_insertion_point(builder_scope:GetTableDescriptorsRequest) @@ -3068,26 +3333,27 @@ public final class MasterMonitorProtos { static { java.lang.String[] descriptorData = { "\n\023MasterMonitor.proto\032\014Master.proto\032\013hba" + - "se.proto\032\023ClusterStatus.proto\"1\n\033GetSche" + - "maAlterStatusRequest\022\022\n\ntable_name\030\001 \002(\014" + - "\"T\n\034GetSchemaAlterStatusResponse\022\035\n\025yet_" + - "to_update_regions\030\001 \001(\r\022\025\n\rtotal_regions" + - "\030\002 \001(\r\"1\n\032GetTableDescriptorsRequest\022\023\n\013" + - "table_names\030\001 \003(\t\"A\n\033GetTableDescriptors" + - "Response\022\"\n\014table_schema\030\001 \003(\0132\014.TableSc" + - "hema\"\031\n\027GetClusterStatusRequest\"B\n\030GetCl" + - "usterStatusResponse\022&\n\016cluster_status\030\001 ", - "\002(\0132\016.ClusterStatus2\314\002\n\024MasterMonitorSer" + - "vice\022S\n\024GetSchemaAlterStatus\022\034.GetSchema" + - "AlterStatusRequest\032\035.GetSchemaAlterStatu" + - "sResponse\022P\n\023GetTableDescriptors\022\033.GetTa" + - "bleDescriptorsRequest\032\034.GetTableDescript" + - "orsResponse\022G\n\020GetClusterStatus\022\030.GetClu" + - "sterStatusRequest\032\031.GetClusterStatusResp" + - "onse\022D\n\017IsMasterRunning\022\027.IsMasterRunnin" + - "gRequest\032\030.IsMasterRunningResponseBI\n*or" + - "g.apache.hadoop.hbase.protobuf.generated", - "B\023MasterMonitorProtosH\001\210\001\001\240\001\001" + "se.proto\032\023ClusterStatus.proto\"=\n\033GetSche" + + "maAlterStatusRequest\022\036\n\ntable_name\030\001 \002(\013" + + "2\n.TableName\"T\n\034GetSchemaAlterStatusResp" + + "onse\022\035\n\025yet_to_update_regions\030\001 \001(\r\022\025\n\rt" + + "otal_regions\030\002 \001(\r\"=\n\032GetTableDescriptor" + + "sRequest\022\037\n\013table_names\030\001 \003(\0132\n.TableNam" + + "e\"A\n\033GetTableDescriptorsResponse\022\"\n\014tabl" + + "e_schema\030\001 \003(\0132\014.TableSchema\"\031\n\027GetClust" + + "erStatusRequest\"B\n\030GetClusterStatusRespo", + "nse\022&\n\016cluster_status\030\001 \002(\0132\016.ClusterSta" + + "tus2\314\002\n\024MasterMonitorService\022S\n\024GetSchem" + + "aAlterStatus\022\034.GetSchemaAlterStatusReque" + + "st\032\035.GetSchemaAlterStatusResponse\022P\n\023Get" + + "TableDescriptors\022\033.GetTableDescriptorsRe" + + "quest\032\034.GetTableDescriptorsResponse\022G\n\020G" + + "etClusterStatus\022\030.GetClusterStatusReques" + + "t\032\031.GetClusterStatusResponse\022D\n\017IsMaster" + + "Running\022\027.IsMasterRunningRequest\032\030.IsMas" + + "terRunningResponseBI\n*org.apache.hadoop.", + "hbase.protobuf.generatedB\023MasterMonitorP" + + "rotosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java index e39dc10..815f3bf 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java @@ -1972,9 +1972,10 @@ public final class SecureBulkLoadProtos { public interface PrepareBulkLoadRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required bytes table_name = 1; + // required .TableName table_name = 1; boolean hasTableName(); - com.google.protobuf.ByteString getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); } public static final class PrepareBulkLoadRequest extends com.google.protobuf.GeneratedMessage @@ -2005,18 +2006,21 @@ public final class SecureBulkLoadProtos { } private int bitField0_; - // required bytes table_name = 1; + // required .TableName table_name = 1; public static final int TABLE_NAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString tableName_; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { return tableName_; } private void initFields() { - tableName_ = com.google.protobuf.ByteString.EMPTY; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -2027,6 +2031,10 @@ public final class SecureBulkLoadProtos { memoizedIsInitialized = 0; return false; } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -2035,7 +2043,7 @@ public final class SecureBulkLoadProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, tableName_); + output.writeMessage(1, tableName_); } getUnknownFields().writeTo(output); } @@ -2048,7 +2056,7 @@ public final class SecureBulkLoadProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, tableName_); + .computeMessageSize(1, tableName_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -2199,6 +2207,7 @@ public final class SecureBulkLoadProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); } } private static Builder create() { @@ -2207,7 +2216,11 @@ public final class SecureBulkLoadProtos { public Builder clear() { super.clear(); - tableName_ = com.google.protobuf.ByteString.EMPTY; + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -2250,7 +2263,11 @@ public final class SecureBulkLoadProtos { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.tableName_ = tableName_; + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -2268,7 +2285,7 @@ public final class SecureBulkLoadProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest other) { if (other == org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest.getDefaultInstance()) return this; if (other.hasTableName()) { - setTableName(other.getTableName()); + mergeTableName(other.getTableName()); } this.mergeUnknownFields(other.getUnknownFields()); return this; @@ -2279,6 +2296,10 @@ public final class SecureBulkLoadProtos { return false; } + if (!getTableName().isInitialized()) { + + return false; + } return true; } @@ -2306,8 +2327,12 @@ public final class SecureBulkLoadProtos { break; } case 10: { - bitField0_ |= 0x00000001; - tableName_ = input.readBytes(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(); + if (hasTableName()) { + subBuilder.mergeFrom(getTableName()); + } + input.readMessage(subBuilder, extensionRegistry); + setTableName(subBuilder.buildPartial()); break; } } @@ -2316,29 +2341,95 @@ public final class SecureBulkLoadProtos { private int bitField0_; - // required bytes table_name = 1; - private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + // required .TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { - return tableName_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } } - public Builder setTableName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - tableName_ = value; - onChanged(); + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; return this; } public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); - tableName_ = getDefaultInstance().getTableName(); - onChanged(); return this; } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } // @@protoc_insertion_point(builder_scope:PrepareBulkLoadRequest) } @@ -3887,19 +3978,19 @@ public final class SecureBulkLoadProtos { "FilesResponse\022\016\n\006loaded\030\001 \002(\010\"[\n\024Delegat" + "ionTokenProto\022\022\n\nidentifier\030\001 \001(\014\022\020\n\010pas" + "sword\030\002 \001(\014\022\014\n\004kind\030\003 \001(\t\022\017\n\007service\030\004 \001" + - "(\t\",\n\026PrepareBulkLoadRequest\022\022\n\ntable_na", - "me\030\001 \002(\014\"-\n\027PrepareBulkLoadResponse\022\022\n\nb" + - "ulk_token\030\001 \002(\t\",\n\026CleanupBulkLoadReques" + - "t\022\022\n\nbulk_token\030\001 \002(\t\"\031\n\027CleanupBulkLoad" + - "Response2\370\001\n\025SecureBulkLoadService\022D\n\017Pr" + - "epareBulkLoad\022\027.PrepareBulkLoadRequest\032\030" + - ".PrepareBulkLoadResponse\022S\n\024SecureBulkLo" + - "adHFiles\022\034.SecureBulkLoadHFilesRequest\032\035" + - ".SecureBulkLoadHFilesResponse\022D\n\017Cleanup" + - "BulkLoad\022\027.CleanupBulkLoadRequest\032\030.Clea" + - "nupBulkLoadResponseBJ\n*org.apache.hadoop", - ".hbase.protobuf.generatedB\024SecureBulkLoa" + - "dProtosH\001\210\001\001\240\001\001" + "(\t\"8\n\026PrepareBulkLoadRequest\022\036\n\ntable_na", + "me\030\001 \002(\0132\n.TableName\"-\n\027PrepareBulkLoadR" + + "esponse\022\022\n\nbulk_token\030\001 \002(\t\",\n\026CleanupBu" + + "lkLoadRequest\022\022\n\nbulk_token\030\001 \002(\t\"\031\n\027Cle" + + "anupBulkLoadResponse2\370\001\n\025SecureBulkLoadS" + + "ervice\022D\n\017PrepareBulkLoad\022\027.PrepareBulkL" + + "oadRequest\032\030.PrepareBulkLoadResponse\022S\n\024" + + "SecureBulkLoadHFiles\022\034.SecureBulkLoadHFi" + + "lesRequest\032\035.SecureBulkLoadHFilesRespons" + + "e\022D\n\017CleanupBulkLoad\022\027.CleanupBulkLoadRe" + + "quest\032\030.CleanupBulkLoadResponseBJ\n*org.a", + "pache.hadoop.hbase.protobuf.generatedB\024S" + + "ecureBulkLoadProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java index db01207..46743b5 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java @@ -5037,9 +5037,10 @@ public final class ZooKeeperProtos { public interface TableLockOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional bytes table_name = 1; + // optional .TableName table_name = 1; boolean hasTableName(); - com.google.protobuf.ByteString getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); // optional .ServerName lock_owner = 2; boolean hasLockOwner(); @@ -5091,13 +5092,16 @@ public final class ZooKeeperProtos { } private int bitField0_; - // optional bytes table_name = 1; + // optional .TableName table_name = 1; public static final int TABLE_NAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString tableName_; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { return tableName_; } @@ -5177,7 +5181,7 @@ public final class ZooKeeperProtos { } private void initFields() { - tableName_ = com.google.protobuf.ByteString.EMPTY; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); threadId_ = 0L; isShared_ = false; @@ -5189,6 +5193,12 @@ public final class ZooKeeperProtos { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (hasTableName()) { + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } if (hasLockOwner()) { if (!getLockOwner().isInitialized()) { memoizedIsInitialized = 0; @@ -5203,7 +5213,7 @@ public final class ZooKeeperProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, tableName_); + output.writeMessage(1, tableName_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeMessage(2, lockOwner_); @@ -5231,7 +5241,7 @@ public final class ZooKeeperProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, tableName_); + .computeMessageSize(1, tableName_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream @@ -5447,6 +5457,7 @@ public final class ZooKeeperProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); getLockOwnerFieldBuilder(); } } @@ -5456,7 +5467,11 @@ public final class ZooKeeperProtos { public Builder clear() { super.clear(); - tableName_ = com.google.protobuf.ByteString.EMPTY; + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); if (lockOwnerBuilder_ == null) { lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); @@ -5513,7 +5528,11 @@ public final class ZooKeeperProtos { if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.tableName_ = tableName_; + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } @@ -5555,7 +5574,7 @@ public final class ZooKeeperProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock other) { if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.getDefaultInstance()) return this; if (other.hasTableName()) { - setTableName(other.getTableName()); + mergeTableName(other.getTableName()); } if (other.hasLockOwner()) { mergeLockOwner(other.getLockOwner()); @@ -5577,6 +5596,12 @@ public final class ZooKeeperProtos { } public final boolean isInitialized() { + if (hasTableName()) { + if (!getTableName().isInitialized()) { + + return false; + } + } if (hasLockOwner()) { if (!getLockOwner().isInitialized()) { @@ -5610,8 +5635,12 @@ public final class ZooKeeperProtos { break; } case 10: { - bitField0_ |= 0x00000001; - tableName_ = input.readBytes(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(); + if (hasTableName()) { + subBuilder.mergeFrom(getTableName()); + } + input.readMessage(subBuilder, extensionRegistry); + setTableName(subBuilder.buildPartial()); break; } case 18: { @@ -5649,29 +5678,95 @@ public final class ZooKeeperProtos { private int bitField0_; - // optional bytes table_name = 1; - private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY; + // optional .TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; public boolean hasTableName() { return ((bitField0_ & 0x00000001) == 0x00000001); } - public com.google.protobuf.ByteString getTableName() { - return tableName_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } } - public Builder setTableName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - tableName_ = value; - onChanged(); + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; return this; } public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000001); - tableName_ = getDefaultInstance().getTableName(); - onChanged(); return this; } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } // optional .ServerName lock_owner = 2; private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); @@ -7093,16 +7188,17 @@ public final class ZooKeeperProtos { "licationState.State\"\"\n\005State\022\013\n\007ENABLED\020" + "\000\022\014\n\010DISABLED\020\001\"+\n\027ReplicationHLogPositi" + "on\022\020\n\010position\030\001 \002(\003\"%\n\017ReplicationLock\022", - "\022\n\nlock_owner\030\001 \002(\t\"\214\001\n\tTableLock\022\022\n\ntab" + - "le_name\030\001 \001(\014\022\037\n\nlock_owner\030\002 \001(\0132\013.Serv" + - "erName\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_shared\030\004" + - " \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_time\030\006 \001" + - "(\003\";\n\017StoreSequenceId\022\023\n\013family_name\030\001 \002" + - "(\014\022\023\n\013sequence_id\030\002 \002(\004\"g\n\026RegionStoreSe" + - "quenceIds\022 \n\030last_flushed_sequence_id\030\001 " + - "\002(\004\022+\n\021store_sequence_id\030\002 \003(\0132\020.StoreSe" + - "quenceIdBE\n*org.apache.hadoop.hbase.prot" + - "obuf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001" + "\022\n\nlock_owner\030\001 \002(\t\"\230\001\n\tTableLock\022\036\n\ntab" + + "le_name\030\001 \001(\0132\n.TableName\022\037\n\nlock_owner\030" + + "\002 \001(\0132\013.ServerName\022\021\n\tthread_id\030\003 \001(\003\022\021\n" + + "\tis_shared\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013cre" + + "ate_time\030\006 \001(\003\";\n\017StoreSequenceId\022\023\n\013fam" + + "ily_name\030\001 \002(\014\022\023\n\013sequence_id\030\002 \002(\004\"g\n\026R" + + "egionStoreSequenceIds\022 \n\030last_flushed_se" + + "quence_id\030\001 \002(\004\022+\n\021store_sequence_id\030\002 \003" + + "(\0132\020.StoreSequenceIdBE\n*org.apache.hadoo" + + "p.hbase.protobuf.generatedB\017ZooKeeperPro", + "tosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { diff --git a/hbase-protocol/src/main/protobuf/AccessControl.proto b/hbase-protocol/src/main/protobuf/AccessControl.proto index e866993..e361478 100644 --- a/hbase-protocol/src/main/protobuf/AccessControl.proto +++ b/hbase-protocol/src/main/protobuf/AccessControl.proto @@ -22,6 +22,8 @@ option java_generic_services = true; option java_generate_equals_and_hash = true; option optimize_for = SPEED; +import "hbase.proto"; + message Permission { enum Action { READ = 0; @@ -31,7 +33,7 @@ message Permission { ADMIN = 4; } repeated Action action = 1; - optional bytes table = 2; + optional TableName tableName = 2; optional bytes family = 3; optional bytes qualifier = 4; } @@ -70,7 +72,7 @@ message RevokeResponse { message UserPermissionsRequest { - optional bytes table = 1; + optional TableName tableName = 1; } message UserPermissionsResponse { diff --git a/hbase-protocol/src/main/protobuf/MasterAdmin.proto b/hbase-protocol/src/main/protobuf/MasterAdmin.proto index d9e41c0..05c288a 100644 --- a/hbase-protocol/src/main/protobuf/MasterAdmin.proto +++ b/hbase-protocol/src/main/protobuf/MasterAdmin.proto @@ -32,7 +32,7 @@ import "Client.proto"; /* Column-level protobufs */ message AddColumnRequest { - required bytes table_name = 1; + required TableName table_name = 1; required ColumnFamilySchema column_families = 2; } @@ -40,7 +40,7 @@ message AddColumnResponse { } message DeleteColumnRequest { - required bytes table_name = 1; + required TableName table_name = 1; required bytes column_name = 2; } @@ -48,7 +48,7 @@ message DeleteColumnResponse { } message ModifyColumnRequest { - required bytes table_name = 1; + required TableName table_name = 1; required ColumnFamilySchema column_families = 2; } @@ -110,34 +110,81 @@ message CreateTableResponse { } message DeleteTableRequest { - required bytes table_name = 1; + required TableName table_name = 1; } message DeleteTableResponse { } message EnableTableRequest { - required bytes table_name = 1; + required TableName table_name = 1; } message EnableTableResponse { } message DisableTableRequest { - required bytes table_name = 1; + required TableName table_name = 1; } message DisableTableResponse { } message ModifyTableRequest { - required bytes table_name = 1; + required TableName table_name = 1; required TableSchema table_schema = 2; } message ModifyTableResponse { } +/* Namespace-level protobufs */ + +message CreateNamespaceRequest { + required NamespaceDescriptor namespaceDescriptor = 1; +} + +message CreateNamespaceResponse { +} + +message DeleteNamespaceRequest { + required string namespaceName = 1; +} + +message DeleteNamespaceResponse { +} + +message ModifyNamespaceRequest { + required NamespaceDescriptor namespaceDescriptor = 1; +} + +message GetNamespaceDescriptorRequest { + required string namespaceName = 1; +} + +message GetNamespaceDescriptorResponse { + required NamespaceDescriptor namespaceDescriptor = 1; +} + +message ModifyNamespaceResponse { +} + +message ListNamespaceDescriptorsRequest { +} + +message ListNamespaceDescriptorsResponse { + repeated NamespaceDescriptor namespaceDescriptor = 1; +} + +message GetTableDescriptorsByNamespaceRequest { + required string namespaceName = 1; +} + +message GetTableDescriptorsByNamespaceResponse { + repeated TableSchema tableSchema = 1; +} + + /* Cluster-level protobufs */ @@ -382,6 +429,31 @@ service MasterAdminService { */ rpc IsRestoreSnapshotDone(IsRestoreSnapshotDoneRequest) returns(IsRestoreSnapshotDoneResponse); + /** return true if master is available */ rpc IsMasterRunning(IsMasterRunningRequest) returns(IsMasterRunningResponse); + + /** Modify a namespace's metadata */ + rpc ModifyNamespace(ModifyNamespaceRequest) + returns(ModifyNamespaceResponse); + + /** Creates a new namespace synchronously */ + rpc CreateNamespace(CreateNamespaceRequest) + returns(CreateNamespaceResponse); + + /** Delete's namespace synchronously */ + rpc DeleteNamespace(DeleteNamespaceRequest) + returns(DeleteNamespaceResponse); + + /** Get a namespace descriptor by name */ + rpc GetNamespaceDescriptor(GetNamespaceDescriptorRequest) + returns(GetNamespaceDescriptorResponse); + + /** returns a list of namespaces */ + rpc ListNamespaceDescriptors(ListNamespaceDescriptorsRequest) + returns(ListNamespaceDescriptorsResponse); + + /** returns a list of tables for a given namespace*/ + rpc GetTableDescriptorsByNamespace(GetTableDescriptorsByNamespaceRequest) + returns(GetTableDescriptorsByNamespaceResponse); } diff --git a/hbase-protocol/src/main/protobuf/MasterMonitor.proto b/hbase-protocol/src/main/protobuf/MasterMonitor.proto index 44e42c5..6760bd9 100644 --- a/hbase-protocol/src/main/protobuf/MasterMonitor.proto +++ b/hbase-protocol/src/main/protobuf/MasterMonitor.proto @@ -29,7 +29,7 @@ import "hbase.proto"; import "ClusterStatus.proto"; message GetSchemaAlterStatusRequest { - required bytes table_name = 1; + required TableName table_name = 1; } message GetSchemaAlterStatusResponse { @@ -38,7 +38,7 @@ message GetSchemaAlterStatusResponse { } message GetTableDescriptorsRequest { - repeated string table_names = 1; + repeated TableName table_names = 1; } message GetTableDescriptorsResponse { diff --git a/hbase-protocol/src/main/protobuf/SecureBulkLoad.proto b/hbase-protocol/src/main/protobuf/SecureBulkLoad.proto index 7d5604b..9bfe07c 100644 --- a/hbase-protocol/src/main/protobuf/SecureBulkLoad.proto +++ b/hbase-protocol/src/main/protobuf/SecureBulkLoad.proto @@ -44,7 +44,7 @@ message DelegationTokenProto { } message PrepareBulkLoadRequest { - required bytes table_name = 1; + required TableName table_name = 1; } message PrepareBulkLoadResponse { diff --git a/hbase-protocol/src/main/protobuf/WAL.proto b/hbase-protocol/src/main/protobuf/WAL.proto index b6f6da6..d0551ff 100644 --- a/hbase-protocol/src/main/protobuf/WAL.proto +++ b/hbase-protocol/src/main/protobuf/WAL.proto @@ -34,7 +34,7 @@ message WALKey { required uint64 log_sequence_number = 3; required uint64 write_time = 4; optional UUID cluster_id = 5; - + repeated FamilyScope scopes = 6; optional uint32 following_kv_count = 7; /* diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto index ef3dcbf..7783f80 100644 --- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto +++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto @@ -144,7 +144,7 @@ message ReplicationLock { * Metadata associated with a table lock in zookeeper */ message TableLock { - optional bytes table_name = 1; + optional TableName table_name = 1; optional ServerName lock_owner = 2; optional int64 thread_id = 3; optional bool is_shared = 4; diff --git a/hbase-protocol/src/main/protobuf/hbase.proto b/hbase-protocol/src/main/protobuf/hbase.proto index 03e5921..08061e5 100644 --- a/hbase-protocol/src/main/protobuf/hbase.proto +++ b/hbase-protocol/src/main/protobuf/hbase.proto @@ -26,11 +26,19 @@ option optimize_for = SPEED; import "Cell.proto"; /** + * Table Name + */ +message TableName { + required bytes namespace = 1; + required bytes qualifier = 2; +} + +/** * Table Schema * Inspired by the rest TableSchema */ message TableSchema { - optional bytes name = 1; + optional TableName table_name = 1; repeated BytesBytesPair attributes = 2; repeated ColumnFamilySchema column_families = 3; repeated NameStringPair configuration = 4; @@ -51,7 +59,7 @@ message ColumnFamilySchema { */ message RegionInfo { required uint64 region_id = 1; - required bytes table_name = 2; + required TableName table_name = 2; optional bytes start_key = 3; optional bytes end_key = 4; optional bool offline = 5; @@ -172,3 +180,8 @@ message UUID { required uint64 least_sig_bits = 1; required uint64 most_sig_bits = 2; } + +message NamespaceDescriptor { + required bytes name = 1; + repeated NameStringPair configuration = 2; +} diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index 455fe99..6bdd104 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -39,14 +39,17 @@ org.apache.hadoop.hbase.master.HMaster; org.apache.hadoop.hbase.master.AssignmentManager; org.apache.hadoop.hbase.master.ServerManager; org.apache.hadoop.hbase.HConstants; +org.apache.hadoop.hbase.NamespaceDescriptor; org.apache.hadoop.hbase.ServerLoad; org.apache.hadoop.hbase.ServerName; org.apache.hadoop.hbase.client.HBaseAdmin; org.apache.hadoop.hbase.client.HConnectionManager; org.apache.hadoop.hbase.HTableDescriptor; org.apache.hadoop.hbase.HBaseConfiguration; +org.apache.hadoop.hbase.TableName; org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; org.apache.hadoop.hbase.master.DeadServer; +org.apache.hadoop.hbase.protobuf.ProtobufUtil; <%if format.equals("json") %> <& ../common/TaskMonitorTmpl; filter = filter; format = "json" &> @@ -293,27 +296,39 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); - <%def catalogTables> +<%java> + HTableDescriptor[] sysTables = admin.getTableDescriptorsByNamespace(NamespaceDescriptor + .SYSTEM_NAMESPACE_NAME_STR); + + <%if (frags != null) %> - <%if (metaLocation != null) %> +<%for HTableDescriptor systemTable : sysTables%> - +<%java>TableName tableName = systemTable.getTableName(); + <%if (frags != null)%> - + - + <%java>String description = null; + if (tableName.equals(TableName.META_TABLE_NAME)){ + description = "The .META. table holds references to all User Table regions"; + } else { + description = "The .NAMESPACE. table holds information about namespaces."; + } + + - - +
System Tables Table NameFrag. Description
<% Bytes.toString(HConstants.META_TABLE_NAME) %><% tableName %><% frags.get(".META.") != null ? frags.get(".META.").intValue() + "%" : "n/a" %><% frags.get(tableName.getNameAsString()) != null ? frags.get(tableName.getNameAsString()) + .intValue() + "%" : "n/a" %> The .META. table holds references to all User Table regions<% description %>
@@ -333,11 +348,12 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); <%for HTableDescriptor htDesc : tables%> - ><% htDesc.getNameAsString() %> + ><% htDesc.getTableName().getNameAsString() %> <%if (frags != null) %> - <% frags.get(htDesc.getNameAsString()) != null ? frags.get(htDesc.getNameAsString()).intValue() + "%" : "n/a" %> + <% frags.get(htDesc.getTableName().getNameAsString()) != null ? frags.get(htDesc.getTableName().getNameAsString()).intValue() + "%" : "n/a" %> - <% master.getAssignmentManager().getRegionStates().getRegionsOfTable(htDesc.getName()).size() %> + <% master.getAssignmentManager().getRegionStates().getRegionsOfTable(htDesc + .getTableName()).size() %> <% htDesc.toStringCustomizedValues() %> @@ -358,9 +374,13 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); Creation Time <%for SnapshotDescription snapshotDesc : snapshots%> + <%java> + TableName snapshotTable = TableName.valueOf(snapshotDesc.getTable()); + <% snapshotDesc.getName() %> - <% snapshotDesc.getTable() %> + <% snapshotTable.getNameAsString() %> + <% new Date(snapshotDesc.getCreationTime()) %> diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index b79f407..43be51c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -450,7 +450,7 @@ public class LocalHBaseCluster { cluster.startup(); HBaseAdmin admin = new HBaseAdmin(conf); HTableDescriptor htd = - new HTableDescriptor(Bytes.toBytes(cluster.getClass().getName())); + new HTableDescriptor(TableName.valueOf(cluster.getClass().getName())); admin.createTable(htd); cluster.shutdown(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java index e087626..0a8e817 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java @@ -31,19 +31,19 @@ import org.apache.hadoop.classification.InterfaceStability; @InterfaceStability.Evolving public interface TableDescriptors { /** - * @param tablename + * @param tableName * @return HTableDescriptor for tablename * @throws IOException */ - HTableDescriptor get(final String tablename) + HTableDescriptor get(final TableName tableName) throws IOException; /** - * @param tablename - * @return HTableDescriptor for tablename + * Get Map of all NamespaceDescriptors for a given namespace. + * @return Map of all descriptors. * @throws IOException */ - HTableDescriptor get(final byte[] tablename) + Map getByNamespace(String name) throws IOException; /** @@ -68,6 +68,6 @@ public interface TableDescriptors { * @return Instance of table descriptor or null if none found. * @throws IOException */ - HTableDescriptor remove(final String tablename) + HTableDescriptor remove(final TableName tablename) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java new file mode 100644 index 0000000..d012fea --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java @@ -0,0 +1,205 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import com.google.common.collect.Sets; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; + +import java.io.IOException; +import java.util.List; +import java.util.NavigableMap; +import java.util.NavigableSet; +import java.util.concurrent.ConcurrentSkipListMap; + + +/** + * Class servers two purposes: + * + * 1. Broadcast NamespaceDescriptor information via ZK + * (Done by the Master) + * 2. Consume broadcasted NamespaceDescriptor changes + * (Done by the RegionServers) + * + */ +@InterfaceAudience.Private +public class ZKNamespaceManager extends ZooKeeperListener { + private static Log LOG = LogFactory.getLog(ZKNamespaceManager.class); + private final String nsZNode; + private volatile NavigableMap cache; + + public ZKNamespaceManager(ZooKeeperWatcher zkw) throws IOException { + super(zkw); + nsZNode = ZooKeeperWatcher.namespaceZNode; + cache = new ConcurrentSkipListMap(); + } + + public void start() throws IOException { + watcher.registerListener(this); + try { + if (ZKUtil.watchAndCheckExists(watcher, nsZNode)) { + List existing = + ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode); + if (existing != null) { + refreshNodes(existing); + } + } else { + ZKUtil.createWithParents(watcher, nsZNode); + } + } catch (KeeperException e) { + throw new IOException("Failed to initialize ZKNamespaceManager", e); + } + } + + public NamespaceDescriptor get(String name) { + return cache.get(name); + } + + public void update(NamespaceDescriptor ns) throws IOException { + writeNamespace(ns); + cache.put(ns.getName(), ns); + } + + public void remove(String name) throws IOException { + deleteNamespace(name); + cache.remove(name); + } + + public NavigableSet list() throws IOException { + NavigableSet ret = + Sets.newTreeSet(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR); + for(NamespaceDescriptor ns: cache.values()) { + ret.add(ns); + } + return ret; + } + + @Override + public void nodeCreated(String path) { + if (nsZNode.equals(path)) { + try { + List nodes = + ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode); + refreshNodes(nodes); + } catch (KeeperException ke) { + String msg = "Error reading data from zookeeper"; + LOG.error(msg, ke); + watcher.abort(msg, ke); + } catch (IOException e) { + String msg = "Error parsing data from zookeeper"; + LOG.error(msg, e); + watcher.abort(msg, e); + } + } + } + + @Override + public void nodeDeleted(String path) { + if (nsZNode.equals(ZKUtil.getParent(path))) { + String nsName = ZKUtil.getNodeName(path); + cache.remove(nsName); + } + } + + @Override + public void nodeDataChanged(String path) { + if (nsZNode.equals(ZKUtil.getParent(path))) { + try { + byte[] data = ZKUtil.getDataAndWatch(watcher, path); + NamespaceDescriptor ns = + ProtobufUtil.toNamespaceDescriptor( + HBaseProtos.NamespaceDescriptor.parseFrom(data)); + cache.put(ns.getName(), ns); + } catch (KeeperException ke) { + String msg = "Error reading data from zookeeper for node "+path; + LOG.error(msg, ke); + // only option is to abort + watcher.abort(msg, ke); + } catch (IOException ioe) { + String msg = "Error deserializing namespace: "+path; + LOG.error(msg, ioe); + watcher.abort(msg, ioe); + } + } + } + + @Override + public void nodeChildrenChanged(String path) { + if (nsZNode.equals(path)) { + try { + List nodes = + ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode); + refreshNodes(nodes); + } catch (KeeperException ke) { + LOG.error("Error reading data from zookeeper for path "+path, ke); + watcher.abort("Zookeeper error get node children for path "+path, ke); + } catch (IOException e) { + LOG.error("Error deserializing namespace child from: "+path, e); + watcher.abort("Error deserializing namespace child from: " + path, e); + } + } + } + + private void deleteNamespace(String name) throws IOException { + String zNode = ZKUtil.joinZNode(nsZNode, name); + try { + ZKUtil.deleteNode(watcher, zNode); + } catch (KeeperException e) { + LOG.error("Failed updating permissions for namespace "+name, e); + throw new IOException("Failed updating permissions for namespace "+name, e); + } + } + + private void writeNamespace(NamespaceDescriptor ns) throws IOException { + String zNode = ZKUtil.joinZNode(nsZNode, ns.getName()); + try { + ZKUtil.createWithParents(watcher, zNode); + ZKUtil.updateExistingNodeData(watcher, zNode, + ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray(), -1); + } catch (KeeperException e) { + LOG.error("Failed updating permissions for namespace "+ns.getName(), e); + throw new IOException("Failed updating permissions for namespace "+ns.getName(), e); + } + } + + private void refreshNodes(List nodes) throws IOException { + for (ZKUtil.NodeAndData n : nodes) { + if (n.isEmpty()) continue; + String path = n.getNode(); + String namespace = ZKUtil.getNodeName(path); + byte[] nodeData = n.getData(); + if (LOG.isDebugEnabled()) { + LOG.debug("Updating namespace cache from node "+namespace+" with data: "+ + Bytes.toStringBinary(nodeData)); + } + NamespaceDescriptor ns = + ProtobufUtil.toNamespaceDescriptor( + HBaseProtos.NamespaceDescriptor.parseFrom(nodeData)); + cache.put(ns.getName(), ns); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java index e9a6605..692be10 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java @@ -31,8 +31,8 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.util.Bytes; @@ -73,7 +73,7 @@ public class HFileArchiver { public static void archiveRegion(Configuration conf, FileSystem fs, HRegionInfo info) throws IOException { Path rootDir = FSUtils.getRootDir(conf); - archiveRegion(fs, rootDir, HTableDescriptor.getTableDir(rootDir, info.getTableName()), + archiveRegion(fs, rootDir, FSUtils.getTableDir(rootDir, info.getTableName()), HRegion.getRegionDir(rootDir, info)); } @@ -107,7 +107,9 @@ public class HFileArchiver { // make sure the regiondir lives under the tabledir Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString())); - Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(rootdir, tableDir, regionDir); + Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(rootdir, + FSUtils.getTableName(tableDir), + regionDir.getName()); FileStatusConverter getAsFile = new FileStatusConverter(fs); // otherwise, we attempt to archive the store files diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java index f601f09..fd81055 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintProcessor.java @@ -75,7 +75,7 @@ public class ConstraintProcessor extends BaseRegionObserver { if (LOG.isInfoEnabled()) { LOG.info("Finished loading " + constraints.size() - + " user Constraints on table: " + new String(desc.getName())); + + " user Constraints on table: " + desc.getTableName()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java index f32ef7a..e312b14 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java @@ -21,10 +21,12 @@ package org.apache.hadoop.hbase.coprocessor; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; @@ -59,156 +61,180 @@ public class BaseMasterObserver implements MasterObserver { @Override public void preDeleteTable(ObserverContext ctx, - byte[] tableName) throws IOException { + TableName tableName) throws IOException { } @Override public void postDeleteTable(ObserverContext ctx, - byte[] tableName) throws IOException { + TableName tableName) throws IOException { } @Override public void preDeleteTableHandler( - final ObserverContext ctx, byte[] tableName) + final ObserverContext ctx, TableName tableName) throws IOException{ } @Override public void postDeleteTableHandler( - final ObserverContext ctx, byte[] tableName) + final ObserverContext ctx, TableName tableName) throws IOException { } @Override public void preModifyTable(ObserverContext ctx, - byte[] tableName, HTableDescriptor htd) throws IOException { + TableName tableName, HTableDescriptor htd) throws IOException { } @Override public void postModifyTableHandler( - ObserverContext ctx, byte[] tableName, + ObserverContext ctx, TableName tableName, HTableDescriptor htd) throws IOException { } @Override public void preModifyTableHandler( - ObserverContext ctx, byte[] tableName, + ObserverContext ctx, TableName tableName, HTableDescriptor htd) throws IOException { } @Override public void postModifyTable(ObserverContext ctx, - byte[] tableName, HTableDescriptor htd) throws IOException { + TableName tableName, HTableDescriptor htd) throws IOException { + } + + @Override + public void preCreateNamespace(ObserverContext ctx, NamespaceDescriptor ns) throws IOException { + } + + @Override + public void postCreateNamespace(ObserverContext ctx, NamespaceDescriptor ns) throws IOException { + } + + @Override + public void preDeleteNamespace(ObserverContext ctx, String namespace) throws IOException { + } + + @Override + public void postDeleteNamespace(ObserverContext ctx, String namespace) throws IOException { + } + + @Override + public void preModifyNamespace(ObserverContext ctx, NamespaceDescriptor ns) throws IOException { + } + + @Override + public void postModifyNamespace(ObserverContext ctx, NamespaceDescriptor ns) throws IOException { } @Override public void preAddColumn(ObserverContext ctx, - byte[] tableName, HColumnDescriptor column) throws IOException { + TableName tableName, HColumnDescriptor column) throws IOException { } @Override public void postAddColumn(ObserverContext ctx, - byte[] tableName, HColumnDescriptor column) throws IOException { + TableName tableName, HColumnDescriptor column) throws IOException { } @Override public void preAddColumnHandler( - ObserverContext ctx, byte[] tableName, + ObserverContext ctx, TableName tableName, HColumnDescriptor column) throws IOException { } @Override public void postAddColumnHandler( - ObserverContext ctx, byte[] tableName, + ObserverContext ctx, TableName tableName, HColumnDescriptor column) throws IOException { } @Override public void preModifyColumn(ObserverContext ctx, - byte[] tableName, HColumnDescriptor descriptor) throws IOException { + TableName tableName, HColumnDescriptor descriptor) throws IOException { } @Override public void postModifyColumn(ObserverContext ctx, - byte[] tableName, HColumnDescriptor descriptor) throws IOException { + TableName tableName, HColumnDescriptor descriptor) throws IOException { } @Override public void preModifyColumnHandler( - ObserverContext ctx, byte[] tableName, + ObserverContext ctx, TableName tableName, HColumnDescriptor descriptor) throws IOException { } @Override public void postModifyColumnHandler( - ObserverContext ctx, byte[] tableName, + ObserverContext ctx, TableName tableName, HColumnDescriptor descriptor) throws IOException { } @Override public void preDeleteColumn(ObserverContext ctx, - byte[] tableName, byte[] c) throws IOException { + TableName tableName, byte[] c) throws IOException { } @Override public void postDeleteColumn(ObserverContext ctx, - byte[] tableName, byte[] c) throws IOException { + TableName tableName, byte[] c) throws IOException { } @Override public void preDeleteColumnHandler( - ObserverContext ctx, byte[] tableName, + ObserverContext ctx, TableName tableName, byte[] c) throws IOException { } @Override public void postDeleteColumnHandler( - ObserverContext ctx, byte[] tableName, + ObserverContext ctx, TableName tableName, byte[] c) throws IOException { } @Override public void preEnableTable(ObserverContext ctx, - byte[] tableName) throws IOException { + TableName tableName) throws IOException { } @Override public void postEnableTable(ObserverContext ctx, - byte[] tableName) throws IOException { + TableName tableName) throws IOException { } @Override public void preEnableTableHandler( - ObserverContext ctx, byte[] tableName) + ObserverContext ctx, TableName tableName) throws IOException { } @Override public void postEnableTableHandler( - ObserverContext ctx, byte[] tableName) + ObserverContext ctx, TableName tableName) throws IOException { } @Override public void preDisableTable(ObserverContext ctx, - byte[] tableName) throws IOException { + TableName tableName) throws IOException { } @Override public void postDisableTable(ObserverContext ctx, - byte[] tableName) throws IOException { + TableName tableName) throws IOException { } @Override public void preDisableTableHandler( - ObserverContext ctx, byte[] tableName) + ObserverContext ctx, TableName tableName) throws IOException { } @Override public void postDisableTableHandler( - ObserverContext ctx, byte[] tableName) + ObserverContext ctx, TableName tableName) throws IOException { } @@ -346,7 +372,8 @@ public class BaseMasterObserver implements MasterObserver { @Override public void preGetTableDescriptors(ObserverContext ctx, - List tableNamesList, List descriptors) throws IOException { + List tableNamesList, List descriptors) + throws IOException { } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index 0448a38..05b9770 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -39,6 +39,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; @@ -366,10 +367,10 @@ public abstract class CoprocessorHost { */ class HTableWrapper implements HTableInterface { - private byte[] tableName; + private TableName tableName; private HTable table; - public HTableWrapper(byte[] tableName) throws IOException { + public HTableWrapper(TableName tableName) throws IOException { this.tableName = tableName; this.table = new HTable(conf, tableName); openTables.add(this); @@ -481,8 +482,14 @@ public abstract class CoprocessorHost { return table.getTableDescriptor(); } + @Override public byte[] getTableName() { - return tableName; + return tableName.getName(); + } + + @Override + public TableName getName() { + return table.getName(); } @Override @@ -667,7 +674,7 @@ public abstract class CoprocessorHost { * @exception java.io.IOException Exception */ @Override - public HTableInterface getTable(byte[] tableName) throws IOException { + public HTableInterface getTable(TableName tableName) throws IOException { return new HTableWrapper(tableName); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index 56dc067..e8e669e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -25,9 +25,11 @@ import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; @@ -96,7 +98,7 @@ public interface MasterObserver extends Coprocessor { * @param tableName the name of the table */ void preDeleteTable(final ObserverContext ctx, - byte[] tableName) throws IOException; + TableName tableName) throws IOException; /** * Called after the deleteTable operation has been requested. Called as part @@ -105,7 +107,7 @@ public interface MasterObserver extends Coprocessor { * @param tableName the name of the table */ void postDeleteTable(final ObserverContext ctx, - byte[] tableName) throws IOException; + TableName tableName) throws IOException; /** * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a @@ -116,7 +118,7 @@ public interface MasterObserver extends Coprocessor { * @param tableName the name of the table */ void preDeleteTableHandler( - final ObserverContext ctx, byte[] tableName) + final ObserverContext ctx, TableName tableName) throws IOException; /** @@ -128,7 +130,7 @@ public interface MasterObserver extends Coprocessor { * @param tableName the name of the table */ void postDeleteTableHandler( - final ObserverContext ctx, byte[] tableName) + final ObserverContext ctx, TableName tableName) throws IOException; /** @@ -140,7 +142,7 @@ public interface MasterObserver extends Coprocessor { * @param htd the HTableDescriptor */ void preModifyTable(final ObserverContext ctx, - final byte[] tableName, HTableDescriptor htd) throws IOException; + final TableName tableName, HTableDescriptor htd) throws IOException; /** * Called after the modifyTable operation has been requested. Called as part @@ -150,7 +152,7 @@ public interface MasterObserver extends Coprocessor { * @param htd the HTableDescriptor */ void postModifyTable(final ObserverContext ctx, - final byte[] tableName, HTableDescriptor htd) throws IOException; + final TableName tableName, HTableDescriptor htd) throws IOException; /** * Called prior to modifying a table's properties. Called as part of modify @@ -162,7 +164,7 @@ public interface MasterObserver extends Coprocessor { */ void preModifyTableHandler( final ObserverContext ctx, - final byte[] tableName, HTableDescriptor htd) throws IOException; + final TableName tableName, HTableDescriptor htd) throws IOException; /** * Called after to modifying a table's properties. Called as part of modify @@ -174,7 +176,7 @@ public interface MasterObserver extends Coprocessor { */ void postModifyTableHandler( final ObserverContext ctx, - final byte[] tableName, HTableDescriptor htd) throws IOException; + final TableName tableName, HTableDescriptor htd) throws IOException; /** * Called prior to adding a new column family to the table. Called as part of @@ -184,7 +186,7 @@ public interface MasterObserver extends Coprocessor { * @param column the HColumnDescriptor */ void preAddColumn(final ObserverContext ctx, - byte[] tableName, HColumnDescriptor column) throws IOException; + TableName tableName, HColumnDescriptor column) throws IOException; /** * Called after the new column family has been created. Called as part of @@ -194,7 +196,7 @@ public interface MasterObserver extends Coprocessor { * @param column the HColumnDescriptor */ void postAddColumn(final ObserverContext ctx, - byte[] tableName, HColumnDescriptor column) throws IOException; + TableName tableName, HColumnDescriptor column) throws IOException; /** * Called prior to adding a new column family to the table. Called as part of @@ -205,7 +207,7 @@ public interface MasterObserver extends Coprocessor { */ void preAddColumnHandler( final ObserverContext ctx, - byte[] tableName, HColumnDescriptor column) throws IOException; + TableName tableName, HColumnDescriptor column) throws IOException; /** * Called after the new column family has been created. Called as part of @@ -216,7 +218,7 @@ public interface MasterObserver extends Coprocessor { */ void postAddColumnHandler( final ObserverContext ctx, - byte[] tableName, HColumnDescriptor column) throws IOException; + TableName tableName, HColumnDescriptor column) throws IOException; /** * Called prior to modifying a column family's attributes. Called as part of @@ -226,7 +228,7 @@ public interface MasterObserver extends Coprocessor { * @param descriptor the HColumnDescriptor */ void preModifyColumn(final ObserverContext ctx, - byte [] tableName, HColumnDescriptor descriptor) throws IOException; + TableName tableName, HColumnDescriptor descriptor) throws IOException; /** * Called after the column family has been updated. Called as part of modify @@ -236,7 +238,7 @@ public interface MasterObserver extends Coprocessor { * @param descriptor the HColumnDescriptor */ void postModifyColumn(final ObserverContext ctx, - byte[] tableName, HColumnDescriptor descriptor) throws IOException; + TableName tableName, HColumnDescriptor descriptor) throws IOException; /** * Called prior to modifying a column family's attributes. Called as part of @@ -247,7 +249,7 @@ public interface MasterObserver extends Coprocessor { */ void preModifyColumnHandler( final ObserverContext ctx, - byte[] tableName, HColumnDescriptor descriptor) throws IOException; + TableName tableName, HColumnDescriptor descriptor) throws IOException; /** * Called after the column family has been updated. Called as part of modify @@ -258,7 +260,7 @@ public interface MasterObserver extends Coprocessor { */ void postModifyColumnHandler( final ObserverContext ctx, - byte[] tableName, HColumnDescriptor descriptor) throws IOException; + TableName tableName, HColumnDescriptor descriptor) throws IOException; /** @@ -269,7 +271,7 @@ public interface MasterObserver extends Coprocessor { * @param c the column */ void preDeleteColumn(final ObserverContext ctx, - final byte [] tableName, final byte[] c) throws IOException; + final TableName tableName, final byte[] c) throws IOException; /** * Called after the column family has been deleted. Called as part of delete @@ -279,7 +281,7 @@ public interface MasterObserver extends Coprocessor { * @param c the column */ void postDeleteColumn(final ObserverContext ctx, - final byte [] tableName, final byte[] c) throws IOException; + final TableName tableName, final byte[] c) throws IOException; /** * Called prior to deleting the entire column family. Called as part of @@ -290,7 +292,7 @@ public interface MasterObserver extends Coprocessor { */ void preDeleteColumnHandler( final ObserverContext ctx, - final byte[] tableName, final byte[] c) throws IOException; + final TableName tableName, final byte[] c) throws IOException; /** * Called after the column family has been deleted. Called as part of @@ -301,7 +303,7 @@ public interface MasterObserver extends Coprocessor { */ void postDeleteColumnHandler( final ObserverContext ctx, - final byte[] tableName, final byte[] c) throws IOException; + final TableName tableName, final byte[] c) throws IOException; /** * Called prior to enabling a table. Called as part of enable table RPC call. @@ -310,7 +312,7 @@ public interface MasterObserver extends Coprocessor { * @param tableName the name of the table */ void preEnableTable(final ObserverContext ctx, - final byte[] tableName) throws IOException; + final TableName tableName) throws IOException; /** * Called after the enableTable operation has been requested. Called as part @@ -319,7 +321,7 @@ public interface MasterObserver extends Coprocessor { * @param tableName the name of the table */ void postEnableTable(final ObserverContext ctx, - final byte[] tableName) throws IOException; + final TableName tableName) throws IOException; /** * Called prior to enabling a table. Called as part of enable table handler @@ -330,7 +332,7 @@ public interface MasterObserver extends Coprocessor { */ void preEnableTableHandler( final ObserverContext ctx, - final byte[] tableName) throws IOException; + final TableName tableName) throws IOException; /** * Called after the enableTable operation has been requested. Called as part @@ -340,7 +342,7 @@ public interface MasterObserver extends Coprocessor { */ void postEnableTableHandler( final ObserverContext ctx, - final byte[] tableName) throws IOException; + final TableName tableName) throws IOException; /** * Called prior to disabling a table. Called as part of disable table RPC @@ -350,7 +352,7 @@ public interface MasterObserver extends Coprocessor { * @param tableName the name of the table */ void preDisableTable(final ObserverContext ctx, - final byte[] tableName) throws IOException; + final TableName tableName) throws IOException; /** * Called after the disableTable operation has been requested. Called as part @@ -359,7 +361,7 @@ public interface MasterObserver extends Coprocessor { * @param tableName the name of the table */ void postDisableTable(final ObserverContext ctx, - final byte[] tableName) throws IOException; + final TableName tableName) throws IOException; /** * Called prior to disabling a table. Called as part of disable table handler @@ -370,7 +372,7 @@ public interface MasterObserver extends Coprocessor { */ void preDisableTableHandler( final ObserverContext ctx, - final byte[] tableName) throws IOException; + final TableName tableName) throws IOException; /** * Called after the disableTable operation has been requested. Called as part @@ -380,7 +382,7 @@ public interface MasterObserver extends Coprocessor { */ void postDisableTableHandler( final ObserverContext ctx, - final byte[] tableName) throws IOException; + final TableName tableName) throws IOException; /** * Called prior to moving a given region from one region server to another. @@ -619,7 +621,8 @@ public interface MasterObserver extends Coprocessor { * @throws IOException */ void preGetTableDescriptors(ObserverContext ctx, - List tableNamesList, List descriptors) throws IOException; + List tableNamesList, + List descriptors) throws IOException; /** * Called after a getTableDescriptors request has been processed. @@ -629,4 +632,58 @@ public interface MasterObserver extends Coprocessor { */ void postGetTableDescriptors(ObserverContext ctx, List descriptors) throws IOException; + + /** + * Called before a new namespace is created by + * {@link org.apache.hadoop.hbase.master.HMaster}. + * It can't bypass the default action, e.g., ctx.bypass() won't have effect. + * @param ctx the environment to interact with the framework and master + * @param ns the NamespaceDescriptor for the table + * @throws IOException + */ + void preCreateNamespace(final ObserverContext ctx, + NamespaceDescriptor ns) throws IOException; + /** + * Called after the createNamespace operation has been requested. + * @param ctx the environment to interact with the framework and master + * @param ns the NamespaceDescriptor for the table + * @throws IOException + */ + void postCreateNamespace(final ObserverContext ctx, + NamespaceDescriptor ns) throws IOException; + + /** + * Called before {@link org.apache.hadoop.hbase.master.HMaster} deletes a + * namespace + * It can't bypass the default action, e.g., ctx.bypass() won't have effect. + * @param ctx the environment to interact with the framework and master + * @param namespace the name of the namespace + */ + void preDeleteNamespace(final ObserverContext ctx, + String namespace) throws IOException; + + /** + * Called after the deleteNamespace operation has been requested. + * @param ctx the environment to interact with the framework and master + * @param namespace the name of the namespace + */ + void postDeleteNamespace(final ObserverContext ctx, + String namespace) throws IOException; + + /** + * Called prior to modifying a namespace's properties. + * It can't bypass the default action, e.g., ctx.bypass() won't have effect. + * @param ctx the environment to interact with the framework and master + * @param ns the NamespaceDescriptor + */ + void preModifyNamespace(final ObserverContext ctx, + NamespaceDescriptor ns) throws IOException; + + /** + * Called after the modifyNamespace operation has been requested. + * @param ctx the environment to interact with the framework and master + * @param ns the NamespaceDescriptor + */ + void postModifyNamespace(final ObserverContext ctx, + NamespaceDescriptor ns) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java index b392584..c4777e1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java @@ -262,7 +262,7 @@ policy implementations, perhaps) ahead of observers. "TestClassloading.jar"); // create a table that references the jar - HTableDescriptor htd = new HTableDescriptor(getClass().getName()); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(getClass().getTableName())); htd.addFamily(new HColumnDescriptor("test")); htd.setValue("Coprocessor$1", path.toString() + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java index e7af169..7c11927 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java @@ -28,13 +28,14 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; +import org.apache.hadoop.hbase.util.Pair; /** * HFileLink describes a link to an hfile. @@ -66,12 +67,15 @@ public class HFileLink extends FileLink { * and the bulk loaded (_SeqId_[0-9]+_) hfiles. */ public static final String LINK_NAME_REGEX = - String.format("%s=%s-%s", HTableDescriptor.VALID_USER_TABLE_REGEX, + String.format("(?:(?:%s=)?)%s=%s-%s", + TableName.VALID_NAMESPACE_REGEX, TableName.VALID_TABLE_QUALIFIER_REGEX, HRegionInfo.ENCODED_REGION_NAME_REGEX, StoreFileInfo.HFILE_NAME_REGEX); /** Define the HFile Link name parser in the form of: table=region-hfile */ - private static final Pattern LINK_NAME_PATTERN = - Pattern.compile(String.format("^(%s)=(%s)-(%s)$", HTableDescriptor.VALID_USER_TABLE_REGEX, + //made package private for testing + static final Pattern LINK_NAME_PATTERN = + Pattern.compile(String.format("^(?:(%s)(?:\\=))?(%s)=(%s)-(%s)$", + TableName.VALID_NAMESPACE_REGEX, TableName.VALID_TABLE_QUALIFIER_REGEX, HRegionInfo.ENCODED_REGION_NAME_REGEX, StoreFileInfo.HFILE_NAME_REGEX)); /** @@ -79,7 +83,8 @@ public class HFileLink extends FileLink { * that can be found in /hbase/table/region/family/ */ private static final Pattern REF_OR_HFILE_LINK_PATTERN = - Pattern.compile(String.format("^(%s)=(%s)-(.+)$", HTableDescriptor.VALID_USER_TABLE_REGEX, + Pattern.compile(String.format("^(?:(%s)(?:=))?(%s)=(%s)-(.+)$", + TableName.VALID_NAMESPACE_REGEX, TableName.VALID_TABLE_QUALIFIER_REGEX, HRegionInfo.ENCODED_REGION_NAME_REGEX)); private final Path archivePath; @@ -138,8 +143,7 @@ public class HFileLink extends FileLink { public static boolean isHFileLink(String fileName) { Matcher m = LINK_NAME_PATTERN.matcher(fileName); if (!m.matches()) return false; - - return m.groupCount() > 2 && m.group(3) != null && m.group(2) != null && m.group(1) != null; + return m.groupCount() > 2 && m.group(4) != null && m.group(3) != null && m.group(2) != null; } /** @@ -159,11 +163,13 @@ public class HFileLink extends FileLink { } // Convert the HFileLink name into a real table/region/cf/hfile path. - String tableName = m.group(1); - String regionName = m.group(2); - String hfileName = m.group(3); + TableName tableName = TableName.valueOf(m.group(1), m.group(2)); + String regionName = m.group(3); + String hfileName = m.group(4); String familyName = path.getParent().getName(); - return new Path(new Path(tableName, regionName), new Path(familyName, hfileName)); + Path tableDir = FSUtils.getTableDir(new Path("./"), tableName); + return new Path(tableDir, new Path(regionName, new Path(familyName, + hfileName))); } /** @@ -177,7 +183,7 @@ public class HFileLink extends FileLink { if (!m.matches()) { throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!"); } - return(m.group(3)); + return(m.group(4)); } /** @@ -191,7 +197,7 @@ public class HFileLink extends FileLink { if (!m.matches()) { throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!"); } - return(m.group(2)); + return(m.group(3)); } /** @@ -200,12 +206,12 @@ public class HFileLink extends FileLink { * @param fileName HFileLink file name * @return the name of the referenced Table */ - public static String getReferencedTableName(final String fileName) { + public static TableName getReferencedTableName(final String fileName) { Matcher m = REF_OR_HFILE_LINK_PATTERN.matcher(fileName); if (!m.matches()) { throw new IllegalArgumentException(fileName + " is not a valid HFileLink name!"); } - return(m.group(1)); + return(TableName.valueOf(m.group(1), m.group(2))); } /** @@ -217,7 +223,7 @@ public class HFileLink extends FileLink { */ public static String createHFileLinkName(final HRegionInfo hfileRegionInfo, final String hfileName) { - return createHFileLinkName(hfileRegionInfo.getTableNameAsString(), + return createHFileLinkName(hfileRegionInfo.getTableName(), hfileRegionInfo.getEncodedName(), hfileName); } @@ -229,9 +235,12 @@ public class HFileLink extends FileLink { * @param hfileName - Linked HFile name * @return file name of the HFile Link */ - public static String createHFileLinkName(final String tableName, + public static String createHFileLinkName(final TableName tableName, final String regionName, final String hfileName) { - return String.format("%s=%s-%s", tableName, regionName, hfileName); + String s = String.format("%s=%s-%s", + tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM, '='), + regionName, hfileName); + return s; } /** @@ -251,7 +260,7 @@ public class HFileLink extends FileLink { public static boolean create(final Configuration conf, final FileSystem fs, final Path dstFamilyPath, final HRegionInfo hfileRegionInfo, final String hfileName) throws IOException { - String linkedTable = hfileRegionInfo.getTableNameAsString(); + TableName linkedTable = hfileRegionInfo.getTableName(); String linkedRegion = hfileRegionInfo.getEncodedName(); return create(conf, fs, dstFamilyPath, linkedTable, linkedRegion, hfileName); } @@ -272,11 +281,12 @@ public class HFileLink extends FileLink { * @throws IOException on file or parent directory creation failure */ public static boolean create(final Configuration conf, final FileSystem fs, - final Path dstFamilyPath, final String linkedTable, final String linkedRegion, + final Path dstFamilyPath, final TableName linkedTable, final String linkedRegion, final String hfileName) throws IOException { String familyName = dstFamilyPath.getName(); String regionName = dstFamilyPath.getParent().getName(); - String tableName = dstFamilyPath.getParent().getParent().getName(); + String tableName = FSUtils.getTableName(dstFamilyPath.getParent().getParent()) + .getNameAsString(); String name = createHFileLinkName(linkedTable, linkedRegion, hfileName); String refName = createBackReferenceName(tableName, regionName); @@ -323,14 +333,18 @@ public class HFileLink extends FileLink { if (!m.matches()) { throw new IllegalArgumentException(hfileLinkName + " is not a valid HFileLink name!"); } - return create(conf, fs, dstFamilyPath, m.group(1), m.group(2), m.group(3)); + return create(conf, fs, dstFamilyPath, TableName.valueOf(m.group(1), m.group(2)), + m.group(3), m.group(4)); } /** * Create the back reference name */ - private static String createBackReferenceName(final String tableName, final String regionName) { - return regionName + "." + tableName; + //package-private for testing + static String createBackReferenceName(final String tableNameStr, + final String regionName) { + + return regionName + "." + tableNameStr.replace(TableName.NAMESPACE_DELIM, '='); } /** @@ -342,20 +356,31 @@ public class HFileLink extends FileLink { * @throws IOException on unexpected error. */ public static Path getHFileFromBackReference(final Path rootDir, final Path linkRefPath) { - int separatorIndex = linkRefPath.getName().indexOf('.'); - String linkRegionName = linkRefPath.getName().substring(0, separatorIndex); - String linkTableName = linkRefPath.getName().substring(separatorIndex + 1); + Pair p = parseBackReferenceName(linkRefPath.getName()); + TableName linkTableName = p.getFirst(); + String linkRegionName = p.getSecond(); + String hfileName = getBackReferenceFileName(linkRefPath.getParent()); Path familyPath = linkRefPath.getParent().getParent(); Path regionPath = familyPath.getParent(); Path tablePath = regionPath.getParent(); - String linkName = createHFileLinkName(tablePath.getName(), regionPath.getName(), hfileName); - Path linkTableDir = FSUtils.getTablePath(rootDir, linkTableName); + String linkName = createHFileLinkName(FSUtils.getTableName(tablePath), + regionPath.getName(), hfileName); + Path linkTableDir = FSUtils.getTableDir(rootDir, linkTableName); Path regionDir = HRegion.getRegionDir(linkTableDir, linkRegionName); return new Path(new Path(regionDir, familyPath.getName()), linkName); } + static Pair parseBackReferenceName(String name) { + int separatorIndex = name.indexOf('.'); + String linkRegionName = name.substring(0, separatorIndex); + String tableSubstr = name.substring(separatorIndex + 1) + .replace('=', TableName.NAMESPACE_DELIM); + TableName linkTableName = TableName.valueOf(tableSubstr); + return new Pair(linkTableName, linkRegionName); + } + /** * Get the full path of the HFile referenced by the back reference * @@ -368,4 +393,5 @@ public class HFileLink extends FileLink { throws IOException { return getHFileFromBackReference(FSUtils.getRootDir(conf), linkRefPath); } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index 93e730b..eca2ec1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -38,6 +38,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; @@ -140,7 +141,7 @@ public class HFilePrettyPrinter { byte[] rn = Bytes.toBytes(regionName); byte[][] hri = HRegionInfo.parseRegionName(rn); Path rootDir = FSUtils.getRootDir(conf); - Path tableDir = new Path(rootDir, Bytes.toString(hri[0])); + Path tableDir = FSUtils.getTableDir(rootDir, TableName.valueOf(hri[0])); String enc = HRegionInfo.encodeRegionName(rn); Path regionDir = new Path(tableDir, enc); if (verbose) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index bf8bb44..842363d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -66,6 +66,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -2138,9 +2139,9 @@ public class RpcServer implements RpcServerInterface { params[1] instanceof Operation) { // if the slow process is a query, we want to log its table as well // as its own fingerprint - byte [] tableName = - HRegionInfo.parseRegionName((byte[]) params[0])[0]; - responseInfo.put("table", Bytes.toStringBinary(tableName)); + TableName tableName = TableName.valueOf( + HRegionInfo.parseRegionName((byte[]) params[0])[0]); + responseInfo.put("table", tableName.getNameAsString()); // annotate the response map with operation details responseInfo.putAll(((Operation) params[1]).toMap()); // report to the log file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java index af81f0e..99de91c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java @@ -135,7 +135,7 @@ implements InputFormat { lastPos = startKeys.length % realNumSplits > i ? lastPos + 1 : lastPos; String regionLocation = table.getRegionLocation(startKeys[startPos]). getHostname(); - splits[i] = new TableSplit(this.table.getTableName(), + splits[i] = new TableSplit(this.table.getName(), startKeys[startPos], ((i + 1) < realNumSplits) ? startKeys[lastPos]: HConstants.EMPTY_START_ROW, regionLocation); LOG.info("split: " + i + "->" + splits[i]); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java index 3ae3b4c..b4ccaa9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java @@ -22,6 +22,7 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapred.InputSplit; @@ -31,14 +32,14 @@ import org.apache.hadoop.mapred.InputSplit; */ @Deprecated public class TableSplit implements InputSplit, Comparable { - private byte [] m_tableName; + private TableName m_tableName; private byte [] m_startRow; private byte [] m_endRow; private String m_regionLocation; /** default constructor */ public TableSplit() { - this(HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, + this((TableName)null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, ""); } @@ -49,7 +50,7 @@ public class TableSplit implements InputSplit, Comparable { * @param endRow * @param location */ - public TableSplit(byte [] tableName, byte [] startRow, byte [] endRow, + public TableSplit(TableName tableName, byte [] startRow, byte [] endRow, final String location) { this.m_tableName = tableName; this.m_startRow = startRow; @@ -57,11 +58,21 @@ public class TableSplit implements InputSplit, Comparable { this.m_regionLocation = location; } + public TableSplit(byte [] tableName, byte [] startRow, byte [] endRow, + final String location) { + this(TableName.valueOf(tableName), startRow, endRow, location); + } + /** @return table name */ - public byte [] getTableName() { + public TableName getTable() { return this.m_tableName; } + /** @return table name */ + public byte [] getTableName() { + return this.m_tableName.getName(); + } + /** @return starting row key */ public byte [] getStartRow() { return this.m_startRow; @@ -87,14 +98,14 @@ public class TableSplit implements InputSplit, Comparable { } public void readFields(DataInput in) throws IOException { - this.m_tableName = Bytes.readByteArray(in); + this.m_tableName = TableName.valueOf(Bytes.readByteArray(in)); this.m_startRow = Bytes.readByteArray(in); this.m_endRow = Bytes.readByteArray(in); this.m_regionLocation = Bytes.toString(Bytes.readByteArray(in)); } public void write(DataOutput out) throws IOException { - Bytes.writeByteArray(out, this.m_tableName); + Bytes.writeByteArray(out, this.m_tableName.getName()); Bytes.writeByteArray(out, this.m_startRow); Bytes.writeByteArray(out, this.m_endRow); Bytes.writeByteArray(out, Bytes.toBytes(this.m_regionLocation)); @@ -117,7 +128,7 @@ public class TableSplit implements InputSplit, Comparable { return false; } TableSplit other = (TableSplit)o; - return Bytes.equals(m_tableName, other.m_tableName) && + return m_tableName.equals(other.m_tableName) && Bytes.equals(m_startRow, other.m_startRow) && Bytes.equals(m_endRow, other.m_endRow) && m_regionLocation.equals(other.m_regionLocation); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java index b2d4b30..0e4a494 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; @@ -358,7 +359,7 @@ public class ImportTsv extends Configured implements Tool { private static void createTable(HBaseAdmin admin, String tableName, String[] columns) throws IOException { - HTableDescriptor htd = new HTableDescriptor(tableName.getBytes()); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); Set cfSet = new HashSet(); for (String aColumn : columns) { if (TsvParser.ROWKEY_COLUMN_SPEC.equals(aColumn)) continue; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index 7187fa5..076218a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -51,6 +51,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -61,7 +62,6 @@ import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.RegionServerCallable; -import org.apache.hadoop.hbase.client.RpcRetryingCaller; import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; import org.apache.hadoop.hbase.client.coprocessor.SecureBulkLoadClient; import org.apache.hadoop.hbase.io.HalfStoreFileReader; @@ -200,7 +200,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { { final HConnection conn = table.getConnection(); - if (!conn.isTableAvailable(table.getTableName())) { + if (!conn.isTableAvailable(table.getName())) { throw new TableNotFoundException("Table " + Bytes.toStringBinary(table.getTableName()) + "is not currently available."); @@ -261,7 +261,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { if(User.isSecurityEnabled()) { userToken = fs.getDelegationToken("renewer"); } - bulkToken = new SecureBulkLoadClient(table).prepareBulkLoad(table.getTableName()); + bulkToken = new SecureBulkLoadClient(table).prepareBulkLoad(table.getName()); } // Assumes that region splits can happen while this occurs. @@ -339,7 +339,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { final Callable> call = new Callable>() { public List call() throws Exception { - List toRetry = tryAtomicRegionLoad(conn, table.getTableName(), first, lqis); + List toRetry = + tryAtomicRegionLoad(conn, table.getName(), first, lqis); return toRetry; } }; @@ -420,8 +421,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } // unique file name for the table - String getUniqueName(byte[] tableName) { - String name = Bytes.toStringBinary(tableName) + "," + regionCount.incrementAndGet(); + String getUniqueName(TableName tableName) { + String name = tableName + "," + regionCount.incrementAndGet(); return name; } @@ -437,7 +438,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { LOG.info("HFile at " + hfilePath + " no longer fits inside a single " + "region. Splitting..."); - String uniqueName = getUniqueName(table.getTableName()); + String uniqueName = getUniqueName(table.getName()); HColumnDescriptor familyDesc = table.getTableDescriptor().getFamily(item.family); Path botOut = new Path(tmpDir, uniqueName + ".bottom"); Path topOut = new Path(tmpDir, uniqueName + ".top"); @@ -530,7 +531,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * failure */ protected List tryAtomicRegionLoad(final HConnection conn, - byte[] tableName, final byte[] first, Collection lqis) throws IOException { + final TableName tableName, + final byte[] first, Collection lqis) throws IOException { final List> famPaths = new ArrayList>(lqis.size()); @@ -595,7 +597,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { if (!success) { LOG.warn("Attempt to bulk load region containing " + Bytes.toStringBinary(first) + " into table " - + Bytes.toStringBinary(tableName) + " with files " + lqis + + tableName + " with files " + lqis + " failed. This is recoverable and they will be retried."); toRetry.addAll(lqis); // return lqi's to retry } @@ -678,7 +680,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { return !HFile.isReservedFileInfoKey(key); } - private boolean doesTableExist(String tableName) throws Exception { + private boolean doesTableExist(TableName tableName) throws Exception { return hbAdmin.tableExists(tableName); } @@ -716,7 +718,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * If the table is created for the first time, then "completebulkload" reads the files twice. * More modifications necessary if we want to avoid doing it. */ - private void createTable(String tableName, String dirPath) throws Exception { + private void createTable(TableName tableName, String dirPath) throws Exception { Path hfofDir = new Path(dirPath); FileSystem fs = hfofDir.getFileSystem(getConf()); @@ -797,7 +799,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } String dirPath = args[0]; - String tableName = args[1]; + TableName tableName = TableName.valueOf(args[1]); boolean tableExists = this.doesTableExist(tableName); if (!tableExists) this.createTable(tableName,dirPath); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java index ff1685a..75d0031 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java @@ -152,7 +152,8 @@ public abstract class MultiTableInputFormatBase extends stopRow) <= 0) && keys.getSecond()[i].length > 0 ? keys .getSecond()[i] : stopRow; InputSplit split = - new TableSplit(tableName, scan, splitStart, splitStop, regionLocation); + new TableSplit(table.getName(), + scan, splitStart, splitStop, regionLocation); splits.add(split); if (LOG.isDebugEnabled()) LOG.debug("getSplits: split -> " + (count++) + " -> " + split); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java index 2062584..c90ce6b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java @@ -166,7 +166,7 @@ extends InputFormat { throw new IOException("Expecting at least one region."); } List splits = new ArrayList(1); - InputSplit split = new TableSplit(table.getTableName(), + InputSplit split = new TableSplit(table.getName(), HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc .getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0]); splits.add(split); @@ -206,7 +206,7 @@ extends InputFormat { Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) && keys.getSecond()[i].length > 0 ? keys.getSecond()[i] : stopRow; - InputSplit split = new TableSplit(table.getTableName(), + InputSplit split = new TableSplit(table.getName(), splitStart, splitStop, regionLocation); splits.add(split); if (LOG.isDebugEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java index efd36de..5d11ed9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java @@ -27,6 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; @@ -76,7 +77,7 @@ implements Writable, Comparable { } private static final Version VERSION = Version.INITIAL; - private byte [] tableName; + private TableName tableName; private byte [] startRow; private byte [] endRow; private String regionLocation; @@ -84,7 +85,7 @@ implements Writable, Comparable { /** Default constructor. */ public TableSplit() { - this(HConstants.EMPTY_BYTE_ARRAY, null, HConstants.EMPTY_BYTE_ARRAY, + this(null, null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, ""); } @@ -97,7 +98,7 @@ implements Writable, Comparable { * @param endRow The end row of the split. * @param location The location of the region. */ - public TableSplit(byte [] tableName, Scan scan, byte [] startRow, byte [] endRow, + public TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow, final String location) { this.tableName = tableName; try { @@ -119,7 +120,7 @@ implements Writable, Comparable { * @param endRow The end row of the split. * @param location The location of the region. */ - public TableSplit(byte[] tableName, byte[] startRow, byte[] endRow, + public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, final String location) { this(tableName, null, startRow, endRow, location); } @@ -139,7 +140,7 @@ implements Writable, Comparable { * * @return The table name. */ - public byte [] getTableName() { + public TableName getTableName() { return tableName; } @@ -216,8 +217,9 @@ implements Writable, Comparable { version = Version.fromCode(len); len = WritableUtils.readVInt(in); } - tableName = new byte[len]; - in.readFully(tableName); + byte[] tableNameBytes = new byte[len]; + in.readFully(tableNameBytes); + tableName = TableName.valueOf(tableNameBytes); startRow = Bytes.readByteArray(in); endRow = Bytes.readByteArray(in); regionLocation = Bytes.toString(Bytes.readByteArray(in)); @@ -235,7 +237,7 @@ implements Writable, Comparable { @Override public void write(DataOutput out) throws IOException { WritableUtils.writeVInt(out, VERSION.code); - Bytes.writeByteArray(out, tableName); + Bytes.writeByteArray(out, tableName.getName()); Bytes.writeByteArray(out, startRow); Bytes.writeByteArray(out, endRow); Bytes.writeByteArray(out, Bytes.toBytes(regionLocation)); @@ -266,7 +268,7 @@ implements Writable, Comparable { // If The table name of the two splits is the same then compare start row // otherwise compare based on table names int tableNameComparison = - Bytes.compareTo(getTableName(), split.getTableName()); + getTableName().compareTo(split.getTableName()); return tableNameComparison != 0 ? tableNameComparison : Bytes.compareTo( getStartRow(), split.getStartRow()); } @@ -276,7 +278,7 @@ implements Writable, Comparable { if (o == null || !(o instanceof TableSplit)) { return false; } - return Bytes.equals(tableName, ((TableSplit)o).tableName) && + return tableName.equals(((TableSplit)o).tableName) && Bytes.equals(startRow, ((TableSplit)o).startRow) && Bytes.equals(endRow, ((TableSplit)o).endRow) && regionLocation.equals(((TableSplit)o).regionLocation); @@ -284,7 +286,7 @@ implements Writable, Comparable { @Override public int hashCode() { - int result = tableName != null ? Arrays.hashCode(tableName) : 0; + int result = tableName != null ? tableName.hashCode() : 0; result = 31 * result + (scan != null ? scan.hashCode() : 0); result = 31 * result + (startRow != null ? Arrays.hashCode(startRow) : 0); result = 31 * result + (endRow != null ? Arrays.hashCode(endRow) : 0); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index ec34ac5..368858d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Delete; @@ -79,7 +80,7 @@ public class WALPlayer extends Configured implements Tool { throws IOException { try { // skip all other tables - if (Bytes.equals(table, key.getTablename())) { + if (Bytes.equals(table, key.getTablename().getName())) { for (KeyValue kv : value.getKeyValues()) { if (WALEdit.isMetaEditFamily(kv.getFamily())) continue; context.write(new ImmutableBytesWritable(kv.getRow()), kv); @@ -108,7 +109,8 @@ public class WALPlayer extends Configured implements Tool { */ static class HLogMapper extends Mapper { - private Map tables = new TreeMap(Bytes.BYTES_COMPARATOR); + private Map tables = + new TreeMap(); @Override public void map(HLogKey key, WALEdit value, @@ -116,10 +118,10 @@ public class WALPlayer extends Configured implements Tool { throws IOException { try { if (tables.isEmpty() || tables.containsKey(key.getTablename())) { - byte[] targetTable = tables.isEmpty() ? + TableName targetTable = tables.isEmpty() ? key.getTablename() : tables.get(key.getTablename()); - ImmutableBytesWritable tableOut = new ImmutableBytesWritable(targetTable); + ImmutableBytesWritable tableOut = new ImmutableBytesWritable(targetTable.getName()); Put put = null; Delete del = null; KeyValue lastKV = null; @@ -168,7 +170,8 @@ public class WALPlayer extends Configured implements Tool { } int i = 0; for (String table : tablesToUse) { - tables.put(Bytes.toBytes(table), Bytes.toBytes(tableMap[i++])); + tables.put(TableName.valueOf(table), + TableName.valueOf(tableMap[i++])); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index dc2c0e4..80f118a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -39,11 +39,15 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import com.google.common.base.Preconditions; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Chore; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.RegionTransition; @@ -53,7 +57,6 @@ import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; @@ -89,7 +92,6 @@ import org.apache.zookeeper.KeeperException.NoNodeException; import org.apache.zookeeper.KeeperException.NodeExistsException; import org.apache.zookeeper.data.Stat; -import com.google.common.base.Preconditions; import com.google.common.collect.LinkedHashMultimap; /** @@ -355,7 +357,7 @@ public class AssignmentManager extends ZooKeeperListener { * @return Pair indicating the status of the alter command * @throws IOException */ - public Pair getReopenStatus(byte[] tableName) + public Pair getReopenStatus(TableName tableName) throws IOException { List hris = MetaReader.getTableRegions(this.server.getCatalogTracker(), tableName, true); @@ -450,7 +452,8 @@ public class AssignmentManager extends ZooKeeperListener { // its a clean cluster startup, else its a failover. Map regions = regionStates.getRegionAssignments(); for (Map.Entry e: regions.entrySet()) { - if (!e.getKey().isMetaTable() && e.getValue() != null) { + if (!HTableDescriptor.isSystemTable(e.getKey().getTableName()) + && e.getValue() != null) { LOG.debug("Found " + e + " out on cluster"); failover = true; break; @@ -1259,7 +1262,7 @@ public class AssignmentManager extends ZooKeeperListener { LOG.info("The master has opened " + regionNameStr + " that was online on " + serverName); boolean disabled = getZKTable().isDisablingOrDisabledTable( - regionInfo.getTableNameAsString()); + regionInfo.getTableName()); if (!serverManager.isServerOnline(serverName) && !disabled) { LOG.info("Opened " + regionNameStr + "but the region server is offline, reassign the region"); @@ -1863,7 +1866,7 @@ public class AssignmentManager extends ZooKeeperListener { // When we have a case such as all the regions are added directly into .META. and we call // assignRegion then we need to make the table ENABLED. Hence in such case the table // will not be in ENABLING or ENABLED state. - String tableName = region.getTableNameAsString(); + TableName tableName = region.getTableName(); if (!zkTable.isEnablingTable(tableName) && !zkTable.isEnabledTable(tableName)) { LOG.debug("Setting table " + tableName + " to ENABLED state."); setEnabledTable(tableName); @@ -2043,7 +2046,7 @@ public class AssignmentManager extends ZooKeeperListener { } private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) { - String tableName = region.getTableNameAsString(); + TableName tableName = region.getTableName(); boolean disabled = this.zkTable.isDisabledTable(tableName); if (disabled || this.zkTable.isDisablingTable(tableName)) { LOG.info("Table " + tableName + (disabled ? " disabled;" : " disabling;") + @@ -2520,10 +2523,10 @@ public class AssignmentManager extends ZooKeeperListener { // Skip assignment for regions of tables in DISABLING state because during clean cluster startup // no RS is alive and regions map also doesn't have any information about the regions. // See HBASE-6281. - Set disabledOrDisablingOrEnabling = ZKTable.getDisabledOrDisablingTables(watcher); + Set disabledOrDisablingOrEnabling = ZKTable.getDisabledOrDisablingTables(watcher); disabledOrDisablingOrEnabling.addAll(ZKTable.getEnablingTables(watcher)); // Scan META for all user regions, skipping any disabled tables - Map allRegions = null; + Map allRegions; if (this.shouldAssignRegionsWithFavoredNodes) { allRegions = FavoredNodeAssignmentHelper.fullScan( catalogTracker, disabledOrDisablingOrEnabling, true, (FavoredNodeLoadBalancer)balancer); @@ -2531,7 +2534,18 @@ public class AssignmentManager extends ZooKeeperListener { allRegions = MetaReader.fullScan( catalogTracker, disabledOrDisablingOrEnabling, true); } - if (allRegions == null || allRegions.isEmpty()) return; + + if (allRegions == null) return; + + //remove system tables because they would have been assigned earlier + for(Iterator iter = allRegions.keySet().iterator(); + iter.hasNext();) { + if (HTableDescriptor.isSystemTable(iter.next().getTableName())) { + iter.remove(); + } + } + + if (allRegions.isEmpty()) return; // Determine what type of assignment to do on startup boolean retainAssignment = server.getConfiguration(). @@ -2545,7 +2559,7 @@ public class AssignmentManager extends ZooKeeperListener { } for (HRegionInfo hri : allRegions.keySet()) { - String tableName = hri.getTableNameAsString(); + TableName tableName = hri.getTableName(); if (!zkTable.isEnabledTable(tableName)) { setEnabledTable(tableName); } @@ -2586,10 +2600,10 @@ public class AssignmentManager extends ZooKeeperListener { * @throws IOException */ Map> rebuildUserRegions() throws IOException, KeeperException { - Set enablingTables = ZKTable.getEnablingTables(watcher); - Set disabledOrEnablingTables = ZKTable.getDisabledTables(watcher); + Set enablingTables = ZKTable.getEnablingTables(watcher); + Set disabledOrEnablingTables = ZKTable.getDisabledTables(watcher); disabledOrEnablingTables.addAll(enablingTables); - Set disabledOrDisablingOrEnabling = ZKTable.getDisablingTables(watcher); + Set disabledOrDisablingOrEnabling = ZKTable.getDisablingTables(watcher); disabledOrDisablingOrEnabling.addAll(disabledOrEnablingTables); // Region assignment from META @@ -2607,7 +2621,7 @@ public class AssignmentManager extends ZooKeeperListener { ServerName regionLocation = region.getSecond(); if (regionInfo == null) continue; regionStates.createRegionState(regionInfo); - String tableName = regionInfo.getTableNameAsString(); + TableName tableName = regionInfo.getTableName(); if (regionLocation == null) { // regionLocation could be null if createTable didn't finish properly. // When createTable is in progress, HMaster restarts. @@ -2678,14 +2692,14 @@ public class AssignmentManager extends ZooKeeperListener { */ private void recoverTableInDisablingState() throws KeeperException, TableNotFoundException, IOException { - Set disablingTables = ZKTable.getDisablingTables(watcher); + Set disablingTables = ZKTable.getDisablingTables(watcher); if (disablingTables.size() != 0) { - for (String tableName : disablingTables) { + for (TableName tableName : disablingTables) { // Recover by calling DisableTableHandler LOG.info("The table " + tableName + " is in DISABLING state. Hence recovering by moving the table" + " to DISABLED state."); - new DisableTableHandler(this.server, tableName.getBytes(), catalogTracker, + new DisableTableHandler(this.server, tableName, catalogTracker, this, tableLockManager, true).prepare().process(); } } @@ -2701,16 +2715,16 @@ public class AssignmentManager extends ZooKeeperListener { */ private void recoverTableInEnablingState() throws KeeperException, TableNotFoundException, IOException { - Set enablingTables = ZKTable.getEnablingTables(watcher); + Set enablingTables = ZKTable.getEnablingTables(watcher); if (enablingTables.size() != 0) { - for (String tableName : enablingTables) { + for (TableName tableName : enablingTables) { // Recover by calling EnableTableHandler LOG.info("The table " + tableName + " is in ENABLING state. Hence recovering by moving the table" + " to ENABLED state."); // enableTable in sync way during master startup, // no need to invoke coprocessor - new EnableTableHandler(this.server, tableName.getBytes(), + new EnableTableHandler(this.server, tableName, catalogTracker, this, tableLockManager, true).prepare().process(); } } @@ -3109,8 +3123,7 @@ public class AssignmentManager extends ZooKeeperListener { } catch (KeeperException ke) { server.abort("Unexpected ZK exception deleting node " + hri, ke); } - - if (zkTable.isDisablingOrDisabledTable(hri.getTableNameAsString())) { + if (zkTable.isDisablingOrDisabledTable(hri.getTableName())) { it.remove(); regionStates.regionOffline(hri); continue; @@ -3143,7 +3156,7 @@ public class AssignmentManager extends ZooKeeperListener { // that case. This is not racing with the region server itself since RS // report is done after the split transaction completed. if (this.zkTable.isDisablingOrDisabledTable( - parent.getTableNameAsString())) { + parent.getTableName())) { unassign(a); unassign(b); } @@ -3166,7 +3179,7 @@ public class AssignmentManager extends ZooKeeperListener { // the master to disable, we need to make sure we close those regions in // that case. This is not racing with the region server itself since RS // report is done after the regions merge transaction completed. - if (this.zkTable.isDisablingOrDisabledTable(merged.getTableNameAsString())) { + if (this.zkTable.isDisablingOrDisabledTable(merged.getTableName())) { unassign(merged); } } @@ -3200,7 +3213,7 @@ public class AssignmentManager extends ZooKeeperListener { zkEventWorkers.shutdownNow(); } - protected void setEnabledTable(String tableName) { + protected void setEnabledTable(TableName tableName) { try { this.zkTable.setEnabledTable(tableName); } catch (KeeperException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index f9d0734..27423f1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -33,6 +33,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Chore; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -46,6 +47,7 @@ import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.hbase.util.Triple; @@ -128,8 +130,8 @@ public class CatalogJanitor extends Chore { * @throws IOException */ Triple, Map> getMergedRegionsAndSplitParents( - final byte[] tableName) throws IOException { - final boolean isTableSpecified = (tableName != null && tableName.length != 0); + final TableName tableName) throws IOException { + final boolean isTableSpecified = (tableName != null); // TODO: Only works with single .META. region currently. Fix. final AtomicInteger count = new AtomicInteger(0); // Keep Map of found split parents. There are candidates for cleanup. @@ -147,7 +149,7 @@ public class CatalogJanitor extends Chore { HRegionInfo info = HRegionInfo.getHRegionInfo(r); if (info == null) return true; // Keep scanning if (isTableSpecified - && Bytes.compareTo(info.getTableName(), tableName) > 0) { + && info.getTableName().compareTo(tableName) > 0) { // Another table, stop scanning return false; } @@ -182,10 +184,9 @@ public class CatalogJanitor extends Chore { final HRegionInfo regionA, final HRegionInfo regionB) throws IOException { FileSystem fs = this.services.getMasterFileSystem().getFileSystem(); Path rootdir = this.services.getMasterFileSystem().getRootDir(); - Path tabledir = HTableDescriptor.getTableDir(rootdir, + Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTableName()); - HTableDescriptor htd = getTableDescriptor(mergedRegion - .getTableNameAsString()); + HTableDescriptor htd = getTableDescriptor(mergedRegion.getTableName()); HRegionFileSystem regionFs = null; try { regionFs = HRegionFileSystem.openRegionFromFileSystem( @@ -289,7 +290,7 @@ public class CatalogJanitor extends Chore { if (left == null) return -1; if (right == null) return 1; // Same table name. - int result = Bytes.compareTo(left.getTableName(), + int result = left.getTableName().compareTo( right.getTableName()); if (result != 0) return result; // Compare start keys. @@ -374,7 +375,7 @@ public class CatalogJanitor extends Chore { FileSystem fs = this.services.getMasterFileSystem().getFileSystem(); Path rootdir = this.services.getMasterFileSystem().getRootDir(); - Path tabledir = HTableDescriptor.getTableDir(rootdir, daughter.getTableName()); + Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTableName()); HRegionFileSystem regionFs = null; try { @@ -386,7 +387,7 @@ public class CatalogJanitor extends Chore { } boolean references = false; - HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableNameAsString()); + HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableName()); for (HColumnDescriptor family: parentDescriptor.getFamilies()) { if ((references = regionFs.hasReferences(family.getNameAsString()))) { break; @@ -395,7 +396,7 @@ public class CatalogJanitor extends Chore { return new Pair(Boolean.TRUE, Boolean.valueOf(references)); } - private HTableDescriptor getTableDescriptor(final String tableName) + private HTableDescriptor getTableDescriptor(final TableName tableName) throws FileNotFoundException, IOException { return this.services.getTableDescriptors().get(tableName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 94e01c0..35aa0ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -28,6 +28,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -40,6 +41,8 @@ import java.util.concurrent.atomic.AtomicReference; import javax.management.ObjectName; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -49,7 +52,11 @@ import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.ClusterId; import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -72,12 +79,7 @@ import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.exceptions.MergeRegionException; -import org.apache.hadoop.hbase.PleaseHoldException; -import org.apache.hadoop.hbase.TableNotDisabledException; -import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorType; @@ -109,6 +111,7 @@ import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.ResponseConverter; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; @@ -206,6 +209,7 @@ import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.RegionServerTracker; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; +import org.apache.hadoop.hbase.zookeeper.ZKTable; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -274,6 +278,10 @@ MasterServices, Server { // Set back to false after we stop rpcServer. Used by tests. private volatile boolean rpcServerOpen = false; + /** Namespace stuff */ + private TableNamespaceManager tableNamespaceManager; + private NamespaceJanitor namespaceJanitorChore; + /** * This servers address. */ @@ -750,6 +758,7 @@ MasterServices, Server { */ status.setStatus("Initializing Master file system"); + this.masterActiveTime = System.currentTimeMillis(); // TODO: Do this using Dependency Injection, using PicoContainer, Guice or Spring. this.fileSystemManager = new MasterFileSystem(this, this, metricsMaster, masterRecovery); @@ -847,6 +856,10 @@ MasterServices, Server { this.fileSystemManager.splitMetaLog(previouslyFailedMetaRSs); } + status.setStatus("Assigning System tables"); + // Make sure system tables are assigned before proceeding. + assignSystemTables(status); + enableServerShutdownHandler(); status.setStatus("Submitting log splitting work for previously failed region servers"); @@ -876,7 +889,9 @@ MasterServices, Server { this.clusterStatusChore = getAndStartClusterStatusChore(this); this.balancerChore = getAndStartBalancerChore(this); this.catalogJanitorChore = new CatalogJanitor(this, this); + this.namespaceJanitorChore = new NamespaceJanitor(this); startCatalogJanitorChore(); + startNamespaceJanitorChore(); } status.markComplete("Initialization successful"); @@ -908,6 +923,14 @@ MasterServices, Server { } /** + * Useful for testing purpose also where we have + * master restart scenarios. + */ + protected void startNamespaceJanitorChore() { + Threads.setDaemonThreadRunning(namespaceJanitorChore.getThread()); + } + + /** * Create a {@link ServerManager} instance. * @param master * @param services @@ -980,9 +1003,9 @@ MasterServices, Server { this.catalogTracker.getMetaLocation()); } - enableCatalogTables(Bytes.toString(HConstants.META_TABLE_NAME)); - LOG.info(".META. assigned=" + assigned + ", rit=" + rit + ", location=" - + catalogTracker.getMetaLocation()); + enableMeta(TableName.META_TABLE_NAME); + LOG.info(".META. assigned=" + assigned + ", rit=" + rit + + ", location=" + catalogTracker.getMetaLocation()); status.setStatus("META assigned."); } @@ -998,6 +1021,82 @@ MasterServices, Server { } } + private void splitLogBeforeAssignment(ServerName currentServer, + Set regions) throws IOException { + if (this.distributedLogReplay) { + this.fileSystemManager.prepareLogReplay(currentServer, regions); + } else { + // In recovered.edits mode: create recovered edits file for region server + this.fileSystemManager.splitLog(currentServer); + } + } + + void assignSystemTables(MonitoredTask status) + throws InterruptedException, IOException, KeeperException { + // Skip assignment for regions of tables in DISABLING state because during clean cluster startup + // no RS is alive and regions map also doesn't have any information about the regions. + // See HBASE-6281. + Set disabledOrDisablingOrEnabling = ZKTable.getDisabledOrDisablingTables(zooKeeper); + disabledOrDisablingOrEnabling.addAll(ZKTable.getEnablingTables(zooKeeper)); + // Scan META for all system regions, skipping any disabled tables + Map allRegions = + MetaReader.fullScan(catalogTracker, disabledOrDisablingOrEnabling, true); + for(Iterator iter = allRegions.keySet().iterator(); + iter.hasNext();) { + if (!HTableDescriptor.isSystemTable(iter.next().getTableName())) { + iter.remove(); + } + } + + int assigned = 0; + boolean beingExpired = false; + + status.setStatus("Assigning System Regions"); + + for(Map.Entry entry: allRegions.entrySet()) { + HRegionInfo regionInfo = entry.getKey(); + ServerName currServer = entry.getValue(); + + assignmentManager.getRegionStates().createRegionState(regionInfo); + boolean rit = this.assignmentManager + .processRegionInTransitionAndBlockUntilAssigned(regionInfo); + boolean regionLocation = false; + if (currServer != null) { + regionLocation = verifyRegionLocation(currServer, regionInfo); + } + + if (!rit && !regionLocation) { + beingExpired = expireIfOnline(currServer); + if (beingExpired) { + splitLogBeforeAssignment(currServer, Sets.newHashSet(regionInfo)); + } + assignmentManager.assign(regionInfo, true); + // Make sure a region location is set. + this.assignmentManager.waitForAssignment(regionInfo); + assigned++; + if (beingExpired && this.distributedLogReplay) { + // In Replay WAL Mode, we need the new region server online + this.fileSystemManager.splitLog(currServer); + } + } else if (rit && !regionLocation) { + if (!waitVerifiedRegionLocation(regionInfo)) return; + assigned++; + } else { + // Region already assigned. We didn't assign it. Add to in-memory state. + this.assignmentManager.regionOnline(regionInfo, currServer); + } + + if (!this.assignmentManager.getZKTable().isEnabledTable(regionInfo.getTableName())) { + this.assignmentManager.setEnabledTable(regionInfo.getTableName()); + } + LOG.info("System Regions assigned=" + assigned + ", rit=" + rit + + ", location=" + catalogTracker.getMetaLocation()); + } + status.setStatus("System Regions assigned."); + + initNamespace(); + } + private void enableSSHandWaitForMeta() throws IOException, InterruptedException { enableServerShutdownHandler(); this.catalogTracker.waitForMeta(); @@ -1006,9 +1105,31 @@ MasterServices, Server { this.assignmentManager.waitForAssignment(HRegionInfo.FIRST_META_REGIONINFO); } - private void enableCatalogTables(String catalogTableName) { - if (!this.assignmentManager.getZKTable().isEnabledTable(catalogTableName)) { - this.assignmentManager.setEnabledTable(catalogTableName); + private boolean waitVerifiedRegionLocation(HRegionInfo regionInfo) throws IOException { + while (!this.stopped) { + Pair p = MetaReader.getRegion(catalogTracker, + regionInfo.getRegionName()); + if (verifyRegionLocation(p.getSecond(), p.getFirst())) break; + } + // We got here because we came of above loop. + return !this.stopped; + } + + private boolean verifyRegionLocation(ServerName currServer, HRegionInfo regionInfo) { + try { + return + ProtobufUtil.getRegionInfo(HConnectionManager.getConnection(conf) + .getAdmin(currServer), + regionInfo.getRegionName()) != null; + } catch (IOException e) { + LOG.info("Failed to contact server: "+currServer, e); + } + return false; + } + + private void enableMeta(TableName metaTableName) { + if (!this.assignmentManager.getZKTable().isEnabledTable(metaTableName)) { + this.assignmentManager.setEnabledTable(metaTableName); } } @@ -1028,6 +1149,12 @@ MasterServices, Server { return true; } + void initNamespace() throws IOException { + //create namespace manager + tableNamespaceManager = new TableNamespaceManager(this); + tableNamespaceManager.start(); + } + /** * This function returns a set of region server names under .META. recovering region ZK node * @return Set of meta server names which were recorded in ZK @@ -1205,6 +1332,9 @@ MasterServices, Server { if (this.clusterStatusPublisherChore != null){ clusterStatusPublisherChore.interrupt(); } + if (this.namespaceJanitorChore != null){ + namespaceJanitorChore.interrupt(); + } } @Override @@ -1386,7 +1516,7 @@ MasterServices, Server { } } - Map>> assignmentsByTable = + Map>> assignmentsByTable = this.assignmentManager.getRegionStates().getAssignmentsByTable(); List plans = new ArrayList(); @@ -1645,13 +1775,18 @@ MasterServices, Server { throw new MasterNotRunningException(); } - HRegionInfo [] newRegions = getHRegionInfos(hTableDescriptor, splitKeys); + String namespace = hTableDescriptor.getTableName().getNamespaceAsString(); + if (getNamespaceDescriptor(namespace) == null) { + throw new ConstraintException("Namespace " + namespace + " does not exist"); + } + + HRegionInfo[] newRegions = getHRegionInfos(hTableDescriptor, splitKeys); checkInitialized(); checkCompression(hTableDescriptor); if (cpHost != null) { cpHost.preCreateTable(hTableDescriptor, newRegions); } - + this.executorService.submit(new CreateTableHandler(this, this.fileSystemManager, hTableDescriptor, conf, newRegions, this).prepare()); @@ -1694,7 +1829,7 @@ MasterServices, Server { HRegionInfo[] hRegionInfos = null; if (splitKeys == null || splitKeys.length == 0) { hRegionInfos = new HRegionInfo[]{ - new HRegionInfo(hTableDescriptor.getName(), null, null)}; + new HRegionInfo(hTableDescriptor.getTableName(), null, null)}; } else { int numRegions = splitKeys.length + 1; hRegionInfos = new HRegionInfo[numRegions]; @@ -1703,19 +1838,19 @@ MasterServices, Server { for (int i = 0; i < numRegions; i++) { endKey = (i == splitKeys.length) ? null : splitKeys[i]; hRegionInfos[i] = - new HRegionInfo(hTableDescriptor.getName(), startKey, endKey); + new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey); startKey = endKey; } } return hRegionInfos; } - private static boolean isCatalogTable(final byte [] tableName) { - return Bytes.equals(tableName, HConstants.META_TABLE_NAME); + private static boolean isCatalogTable(final TableName tableName) { + return tableName.equals(TableName.META_TABLE_NAME); } @Override - public void deleteTable(final byte[] tableName) throws IOException { + public void deleteTable(final TableName tableName) throws IOException { checkInitialized(); if (cpHost != null) { cpHost.preDeleteTable(tableName); @@ -1730,7 +1865,7 @@ MasterServices, Server { public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest request) throws ServiceException { try { - deleteTable(request.getTableName().toByteArray()); + deleteTable(ProtobufUtil.toTableName(request.getTableName())); } catch (IOException ioe) { throw new ServiceException(ioe); } @@ -1752,7 +1887,7 @@ MasterServices, Server { // may overlap with other table operations or the table operation may // have completed before querying this API. We need to refactor to a // transaction system in the future to avoid these ambiguities. - byte [] tableName = req.getTableName().toByteArray(); + TableName tableName = ProtobufUtil.toTableName(req.getTableName()); try { Pair pair = this.assignmentManager.getReopenStatus(tableName); @@ -1766,7 +1901,7 @@ MasterServices, Server { } @Override - public void addColumn(final byte[] tableName, final HColumnDescriptor column) + public void addColumn(final TableName tableName, final HColumnDescriptor column) throws IOException { checkInitialized(); if (cpHost != null) { @@ -1786,7 +1921,7 @@ MasterServices, Server { public AddColumnResponse addColumn(RpcController controller, AddColumnRequest req) throws ServiceException { try { - addColumn(req.getTableName().toByteArray(), + addColumn(ProtobufUtil.toTableName(req.getTableName()), HColumnDescriptor.convert(req.getColumnFamilies())); } catch (IOException ioe) { throw new ServiceException(ioe); @@ -1795,7 +1930,7 @@ MasterServices, Server { } @Override - public void modifyColumn(byte[] tableName, HColumnDescriptor descriptor) + public void modifyColumn(TableName tableName, HColumnDescriptor descriptor) throws IOException { checkInitialized(); checkCompression(descriptor); @@ -1815,7 +1950,7 @@ MasterServices, Server { public ModifyColumnResponse modifyColumn(RpcController controller, ModifyColumnRequest req) throws ServiceException { try { - modifyColumn(req.getTableName().toByteArray(), + modifyColumn(ProtobufUtil.toTableName(req.getTableName()), HColumnDescriptor.convert(req.getColumnFamilies())); } catch (IOException ioe) { throw new ServiceException(ioe); @@ -1824,7 +1959,7 @@ MasterServices, Server { } @Override - public void deleteColumn(final byte[] tableName, final byte[] columnName) + public void deleteColumn(final TableName tableName, final byte[] columnName) throws IOException { checkInitialized(); if (cpHost != null) { @@ -1842,7 +1977,8 @@ MasterServices, Server { public DeleteColumnResponse deleteColumn(RpcController controller, DeleteColumnRequest req) throws ServiceException { try { - deleteColumn(req.getTableName().toByteArray(), req.getColumnName().toByteArray()); + deleteColumn(ProtobufUtil.toTableName(req.getTableName()), + req.getColumnName().toByteArray()); } catch (IOException ioe) { throw new ServiceException(ioe); } @@ -1850,7 +1986,7 @@ MasterServices, Server { } @Override - public void enableTable(final byte[] tableName) throws IOException { + public void enableTable(final TableName tableName) throws IOException { checkInitialized(); if (cpHost != null) { cpHost.preEnableTable(tableName); @@ -1866,7 +2002,7 @@ MasterServices, Server { public EnableTableResponse enableTable(RpcController controller, EnableTableRequest request) throws ServiceException { try { - enableTable(request.getTableName().toByteArray()); + enableTable(ProtobufUtil.toTableName(request.getTableName())); } catch (IOException ioe) { throw new ServiceException(ioe); } @@ -1874,7 +2010,7 @@ MasterServices, Server { } @Override - public void disableTable(final byte[] tableName) throws IOException { + public void disableTable(final TableName tableName) throws IOException { checkInitialized(); if (cpHost != null) { cpHost.preDisableTable(tableName); @@ -1890,7 +2026,7 @@ MasterServices, Server { public DisableTableResponse disableTable(RpcController controller, DisableTableRequest request) throws ServiceException { try { - disableTable(request.getTableName().toByteArray()); + disableTable(ProtobufUtil.toTableName(request.getTableName())); } catch (IOException ioe) { throw new ServiceException(ioe); } @@ -1904,7 +2040,7 @@ MasterServices, Server { * may be null. */ Pair getTableRegionForRow( - final byte [] tableName, final byte [] rowKey) + final TableName tableName, final byte [] rowKey) throws IOException { final AtomicReference> result = new AtomicReference>(null); @@ -1920,7 +2056,7 @@ MasterServices, Server { if (pair == null) { return false; } - if (!Bytes.equals(pair.getFirst().getTableName(), tableName)) { + if (!pair.getFirst().getTableName().equals(tableName)) { return false; } result.set(pair); @@ -1933,7 +2069,7 @@ MasterServices, Server { } @Override - public void modifyTable(final byte[] tableName, final HTableDescriptor descriptor) + public void modifyTable(final TableName tableName, final HTableDescriptor descriptor) throws IOException { checkInitialized(); checkCompression(descriptor); @@ -1950,7 +2086,7 @@ MasterServices, Server { public ModifyTableResponse modifyTable(RpcController controller, ModifyTableRequest req) throws ServiceException { try { - modifyTable(req.getTableName().toByteArray(), + modifyTable(ProtobufUtil.toTableName(req.getTableName()), HTableDescriptor.convert(req.getTableSchema())); } catch (IOException ioe) { throw new ServiceException(ioe); @@ -1959,17 +2095,16 @@ MasterServices, Server { } @Override - public void checkTableModifiable(final byte [] tableName) + public void checkTableModifiable(final TableName tableName) throws IOException, TableNotFoundException, TableNotDisabledException { - String tableNameStr = Bytes.toString(tableName); if (isCatalogTable(tableName)) { throw new IOException("Can't modify catalog tables"); } - if (!MetaReader.tableExists(getCatalogTracker(), tableNameStr)) { - throw new TableNotFoundException(tableNameStr); + if (!MetaReader.tableExists(getCatalogTracker(), tableName)) { + throw new TableNotFoundException(tableName); } if (!getAssignmentManager().getZKTable(). - isDisabledTable(Bytes.toString(tableName))) { + isDisabledTable(tableName)) { throw new TableNotDisabledException(tableName); } } @@ -2436,11 +2571,14 @@ MasterServices, Server { public GetTableDescriptorsResponse getTableDescriptors( RpcController controller, GetTableDescriptorsRequest req) throws ServiceException { List descriptors = new ArrayList(); - + List tableNameList = new ArrayList(); + for(HBaseProtos.TableName tableNamePB: req.getTableNamesList()) { + tableNameList.add(ProtobufUtil.toTableName(tableNamePB)); + } boolean bypass = false; if (this.cpHost != null) { try { - bypass = this.cpHost.preGetTableDescriptors(req.getTableNamesList(), descriptors); + bypass = this.cpHost.preGetTableDescriptors(tableNameList, descriptors); } catch (IOException ioe) { throw new ServiceException(ioe); } @@ -2456,10 +2594,14 @@ MasterServices, Server { LOG.warn("Failed getting all descriptors", e); } if (descriptorMap != null) { - descriptors.addAll(descriptorMap.values()); + for(HTableDescriptor desc: descriptorMap.values()) { + if(!HTableDescriptor.isSystemTable(desc.getTableName())) { + descriptors.add(desc); + } + } } } else { - for (String s: req.getTableNamesList()) { + for (TableName s: tableNameList) { try { HTableDescriptor desc = this.tableDescriptors.get(s); if (desc != null) { @@ -2809,9 +2951,136 @@ MasterServices, Server { } } + @Override + public MasterAdminProtos.ModifyNamespaceResponse modifyNamespace(RpcController controller, + MasterAdminProtos.ModifyNamespaceRequest request) throws ServiceException { + try { + modifyNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor())); + return MasterAdminProtos.ModifyNamespaceResponse.getDefaultInstance(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public MasterAdminProtos.CreateNamespaceResponse createNamespace(RpcController controller, + MasterAdminProtos.CreateNamespaceRequest request) throws ServiceException { + try { + createNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor())); + return MasterAdminProtos.CreateNamespaceResponse.getDefaultInstance(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public MasterAdminProtos.DeleteNamespaceResponse deleteNamespace(RpcController controller, MasterAdminProtos.DeleteNamespaceRequest request) throws ServiceException { + try { + deleteNamespace(request.getNamespaceName()); + return MasterAdminProtos.DeleteNamespaceResponse.getDefaultInstance(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public MasterAdminProtos.GetNamespaceDescriptorResponse getNamespaceDescriptor( + RpcController controller, MasterAdminProtos.GetNamespaceDescriptorRequest request) + throws ServiceException { + try { + return MasterAdminProtos.GetNamespaceDescriptorResponse.newBuilder() + .setNamespaceDescriptor( + ProtobufUtil.toProtoNamespaceDescriptor(getNamespaceDescriptor(request.getNamespaceName()))) + .build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public MasterAdminProtos.ListNamespaceDescriptorsResponse listNamespaceDescriptors( + RpcController controller, MasterAdminProtos.ListNamespaceDescriptorsRequest request) + throws ServiceException { + try { + MasterAdminProtos.ListNamespaceDescriptorsResponse.Builder response = + MasterAdminProtos.ListNamespaceDescriptorsResponse.newBuilder(); + for(NamespaceDescriptor ns: listNamespaceDescriptors()) { + response.addNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(ns)); + } + return response.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override + public MasterAdminProtos.GetTableDescriptorsByNamespaceResponse getTableDescriptorsByNamespace( + RpcController controller, MasterAdminProtos.GetTableDescriptorsByNamespaceRequest request) + throws ServiceException { + try { + MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.Builder b = + MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.newBuilder(); + for(HTableDescriptor htd: getTableDescriptorsByNamespace(request.getNamespaceName())) { + b.addTableSchema(htd.convert()); + } + return b.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + private boolean isHealthCheckerConfigured() { String healthScriptLocation = this.conf.get(HConstants.HEALTH_SCRIPT_LOC); return org.apache.commons.lang.StringUtils.isNotBlank(healthScriptLocation); } + public void createNamespace(NamespaceDescriptor descriptor) throws IOException { + TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName())); + if (cpHost != null) { + if (cpHost.preCreateNamespace(descriptor)) { + return; + } + } + tableNamespaceManager.create(descriptor); + if (cpHost != null) { + cpHost.postCreateNamespace(descriptor); + } + } + + public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException { + TableName.isLegalNamespaceName(Bytes.toBytes(descriptor.getName())); + if (cpHost != null) { + if (cpHost.preModifyNamespace(descriptor)) { + return; + } + } + tableNamespaceManager.update(descriptor); + if (cpHost != null) { + cpHost.postModifyNamespace(descriptor); + } + } + + public void deleteNamespace(String name) throws IOException { + if (cpHost != null) { + if (cpHost.preDeleteNamespace(name)) { + return; + } + } + tableNamespaceManager.remove(name); + if (cpHost != null) { + cpHost.postDeleteNamespace(name); + } + } + + public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException { + return tableNamespaceManager.get(name); + } + + public List listNamespaceDescriptors() throws IOException { + return Lists.newArrayList(tableNamespaceManager.list()); + } + + public List getTableDescriptorsByNamespace(String name) throws IOException { + return Lists.newArrayList(tableDescriptors.getByNamespace(name).values()); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 8769be8..ddeb9c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -87,6 +87,124 @@ public class MasterCoprocessorHost abortServer("master", masterServices, env, e); } + boolean preCreateNamespace(NamespaceDescriptor ns) + throws IOException { + boolean bypass = false; + ObserverContext ctx = null; + for (MasterEnvironment env: coprocessors) { + if (env.getInstance() instanceof MasterObserver) { + ctx = ObserverContext.createAndPrepare(env, ctx); + try { + ((MasterObserver)env.getInstance()).preCreateNamespace( + ctx, ns); + } catch (Throwable e) { + handleCoprocessorThrowable(env, e); + } + bypass |= ctx.shouldBypass(); + if (ctx.shouldComplete()) { + break; + } + } + } + return bypass; + } + + void postCreateNamespace(NamespaceDescriptor ns) + throws IOException { + ObserverContext ctx = null; + for (MasterEnvironment env: coprocessors) { + if (env.getInstance() instanceof MasterObserver) { + ctx = ObserverContext.createAndPrepare(env, ctx); + try { + ((MasterObserver)env.getInstance()).postCreateNamespace(ctx, ns); + } catch (Throwable e) { + handleCoprocessorThrowable(env, e); + } + if (ctx.shouldComplete()) { + break; + } + } + } + } + + boolean preDeleteNamespace(String namespaceName) throws IOException { + boolean bypass = false; + ObserverContext ctx = null; + for (MasterEnvironment env: coprocessors) { + if (env.getInstance() instanceof MasterObserver) { + ctx = ObserverContext.createAndPrepare(env, ctx); + try { + ((MasterObserver)env.getInstance()).preDeleteNamespace( + ctx, namespaceName); + } catch (Throwable e) { + handleCoprocessorThrowable(env, e); + } + bypass |= ctx.shouldBypass(); + if (ctx.shouldComplete()) { + break; + } + } + } + return bypass; + } + + void postDeleteNamespace(String namespaceName) throws IOException { + ObserverContext ctx = null; + for (MasterEnvironment env: coprocessors) { + if (env.getInstance() instanceof MasterObserver) { + ctx = ObserverContext.createAndPrepare(env, ctx); + try { + ((MasterObserver)env.getInstance()).postDeleteNamespace(ctx, namespaceName); + } catch (Throwable e) { + handleCoprocessorThrowable(env, e); + } + if (ctx.shouldComplete()) { + break; + } + } + } + } + + boolean preModifyNamespace(NamespaceDescriptor ns) + throws IOException { + boolean bypass = false; + ObserverContext ctx = null; + for (MasterEnvironment env: coprocessors) { + if (env.getInstance() instanceof MasterObserver) { + ctx = ObserverContext.createAndPrepare(env, ctx); + try { + ((MasterObserver)env.getInstance()).preModifyNamespace( + ctx, ns); + } catch (Throwable e) { + handleCoprocessorThrowable(env, e); + } + bypass |= ctx.shouldBypass(); + if (ctx.shouldComplete()) { + break; + } + } + } + return bypass; + } + + void postModifyNamespace(NamespaceDescriptor ns) + throws IOException { + ObserverContext ctx = null; + for (MasterEnvironment env: coprocessors) { + if (env.getInstance() instanceof MasterObserver) { + ctx = ObserverContext.createAndPrepare(env, ctx); + try { + ((MasterObserver)env.getInstance()).postModifyNamespace(ctx, ns); + } catch (Throwable e) { + handleCoprocessorThrowable(env, e); + } + if (ctx.shouldComplete()) { + break; + } + } + } + } + /* Implementation of hooks for invoking MasterObservers */ public void preCreateTable(HTableDescriptor htd, HRegionInfo[] regions) throws IOException { @@ -162,7 +280,7 @@ public class MasterCoprocessorHost } } - public void preDeleteTable(byte[] tableName) throws IOException { + public void preDeleteTable(TableName tableName) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env: coprocessors) { if (env.getInstance() instanceof MasterObserver) { @@ -179,7 +297,7 @@ public class MasterCoprocessorHost } } - public void postDeleteTable(byte[] tableName) throws IOException { + public void postDeleteTable(TableName tableName) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env: coprocessors) { if (env.getInstance() instanceof MasterObserver) { @@ -196,7 +314,7 @@ public class MasterCoprocessorHost } } - public void preDeleteTableHandler(byte[] tableName) throws IOException { + public void preDeleteTableHandler(TableName tableName) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env : coprocessors) { if (env.getInstance() instanceof MasterObserver) { @@ -214,7 +332,7 @@ public class MasterCoprocessorHost } } - public void postDeleteTableHandler(byte[] tableName) throws IOException { + public void postDeleteTableHandler(TableName tableName) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env : coprocessors) { if (env.getInstance() instanceof MasterObserver) { @@ -231,7 +349,7 @@ public class MasterCoprocessorHost } } } - public void preModifyTable(final byte[] tableName, HTableDescriptor htd) + public void preModifyTable(final TableName tableName, HTableDescriptor htd) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env: coprocessors) { @@ -250,7 +368,7 @@ public class MasterCoprocessorHost } } - public void postModifyTable(final byte[] tableName, HTableDescriptor htd) + public void postModifyTable(final TableName tableName, HTableDescriptor htd) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env: coprocessors) { @@ -269,7 +387,7 @@ public class MasterCoprocessorHost } } - public void preModifyTableHandler(final byte[] tableName, HTableDescriptor htd) + public void preModifyTableHandler(final TableName tableName, HTableDescriptor htd) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env : coprocessors) { @@ -288,7 +406,7 @@ public class MasterCoprocessorHost } } - public void postModifyTableHandler(final byte[] tableName, + public void postModifyTableHandler(final TableName tableName, HTableDescriptor htd) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env : coprocessors) { @@ -307,7 +425,7 @@ public class MasterCoprocessorHost } } - public boolean preAddColumn(byte [] tableName, HColumnDescriptor column) + public boolean preAddColumn(TableName tableName, HColumnDescriptor column) throws IOException { boolean bypass = false; ObserverContext ctx = null; @@ -328,7 +446,7 @@ public class MasterCoprocessorHost return bypass; } - public void postAddColumn(byte [] tableName, HColumnDescriptor column) + public void postAddColumn(TableName tableName, HColumnDescriptor column) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env: coprocessors) { @@ -347,7 +465,7 @@ public class MasterCoprocessorHost } } - public boolean preAddColumnHandler(byte[] tableName, HColumnDescriptor column) + public boolean preAddColumnHandler(TableName tableName, HColumnDescriptor column) throws IOException { boolean bypass = false; ObserverContext ctx = null; @@ -369,7 +487,7 @@ public class MasterCoprocessorHost return bypass; } - public void postAddColumnHandler(byte[] tableName, HColumnDescriptor column) + public void postAddColumnHandler(TableName tableName, HColumnDescriptor column) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env : coprocessors) { @@ -388,7 +506,7 @@ public class MasterCoprocessorHost } } - public boolean preModifyColumn(byte [] tableName, HColumnDescriptor descriptor) + public boolean preModifyColumn(TableName tableName, HColumnDescriptor descriptor) throws IOException { boolean bypass = false; ObserverContext ctx = null; @@ -410,7 +528,7 @@ public class MasterCoprocessorHost return bypass; } - public void postModifyColumn(byte [] tableName, HColumnDescriptor descriptor) + public void postModifyColumn(TableName tableName, HColumnDescriptor descriptor) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env: coprocessors) { @@ -429,7 +547,7 @@ public class MasterCoprocessorHost } } - public boolean preModifyColumnHandler(byte[] tableName, + public boolean preModifyColumnHandler(TableName tableName, HColumnDescriptor descriptor) throws IOException { boolean bypass = false; ObserverContext ctx = null; @@ -451,7 +569,7 @@ public class MasterCoprocessorHost return bypass; } - public void postModifyColumnHandler(byte[] tableName, + public void postModifyColumnHandler(TableName tableName, HColumnDescriptor descriptor) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env : coprocessors) { @@ -470,7 +588,7 @@ public class MasterCoprocessorHost } } - boolean preDeleteColumn(final byte [] tableName, final byte [] c) + boolean preDeleteColumn(final TableName tableName, final byte [] c) throws IOException { boolean bypass = false; ObserverContext ctx = null; @@ -491,7 +609,7 @@ public class MasterCoprocessorHost return bypass; } - public void postDeleteColumn(final byte [] tableName, final byte [] c) + public void postDeleteColumn(final TableName tableName, final byte [] c) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env: coprocessors) { @@ -510,7 +628,7 @@ public class MasterCoprocessorHost } } - public boolean preDeleteColumnHandler(final byte[] tableName, final byte[] c) + public boolean preDeleteColumnHandler(final TableName tableName, final byte[] c) throws IOException { boolean bypass = false; ObserverContext ctx = null; @@ -532,7 +650,7 @@ public class MasterCoprocessorHost return bypass; } - public void postDeleteColumnHandler(final byte[] tableName, final byte[] c) + public void postDeleteColumnHandler(final TableName tableName, final byte[] c) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env : coprocessors) { @@ -551,7 +669,7 @@ public class MasterCoprocessorHost } } - public void preEnableTable(final byte [] tableName) throws IOException { + public void preEnableTable(final TableName tableName) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env: coprocessors) { if (env.getInstance() instanceof MasterObserver) { @@ -568,7 +686,7 @@ public class MasterCoprocessorHost } } - public void postEnableTable(final byte [] tableName) throws IOException { + public void postEnableTable(final TableName tableName) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env: coprocessors) { if (env.getInstance() instanceof MasterObserver) { @@ -585,7 +703,7 @@ public class MasterCoprocessorHost } } - public void preEnableTableHandler(final byte[] tableName) throws IOException { + public void preEnableTableHandler(final TableName tableName) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env : coprocessors) { if (env.getInstance() instanceof MasterObserver) { @@ -603,7 +721,7 @@ public class MasterCoprocessorHost } } - public void postEnableTableHandler(final byte[] tableName) throws IOException { + public void postEnableTableHandler(final TableName tableName) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env : coprocessors) { if (env.getInstance() instanceof MasterObserver) { @@ -621,7 +739,7 @@ public class MasterCoprocessorHost } } - public void preDisableTable(final byte [] tableName) throws IOException { + public void preDisableTable(final TableName tableName) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env: coprocessors) { if (env.getInstance() instanceof MasterObserver) { @@ -638,7 +756,7 @@ public class MasterCoprocessorHost } } - public void postDisableTable(final byte [] tableName) throws IOException { + public void postDisableTable(final TableName tableName) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env: coprocessors) { if (env.getInstance() instanceof MasterObserver) { @@ -655,7 +773,7 @@ public class MasterCoprocessorHost } } - public void preDisableTableHandler(final byte[] tableName) throws IOException { + public void preDisableTableHandler(final TableName tableName) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env : coprocessors) { if (env.getInstance() instanceof MasterObserver) { @@ -673,7 +791,7 @@ public class MasterCoprocessorHost } } - public void postDisableTableHandler(final byte[] tableName) + public void postDisableTableHandler(final TableName tableName) throws IOException { ObserverContext ctx = null; for (MasterEnvironment env : coprocessors) { @@ -1114,7 +1232,7 @@ public class MasterCoprocessorHost } } - public boolean preGetTableDescriptors(final List tableNamesList, + public boolean preGetTableDescriptors(final List tableNamesList, final List descriptors) throws IOException { boolean bypass = false; ObserverContext ctx = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index b0a0c25..fcc99ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.ClusterId; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -529,41 +530,32 @@ public class MasterFileSystem { HFileArchiver.archiveRegion(conf, fs, region); } - public void deleteTable(byte[] tableName) throws IOException { - fs.delete(new Path(rootdir, Bytes.toString(tableName)), true); + public void deleteTable(TableName tableName) throws IOException { + fs.delete(FSUtils.getTableDir(rootdir, tableName), true); } /** - * Move the specified file/directory to the hbase temp directory. - * @param path The path of the file/directory to move - * @return The temp location of the file/directory moved + * Move the specified table to the hbase temp directory + * @param tableName Table name to move + * @return The temp location of the table moved * @throws IOException in case of file-system failure */ - public Path moveToTemp(final Path path) throws IOException { - Path tempPath = new Path(this.tempdir, path.getName()); + public Path moveTableToTemp(TableName tableName) throws IOException { + Path srcPath = FSUtils.getTableDir(rootdir, tableName); + Path tempPath = FSUtils.getTableDir(this.tempdir, tableName); // Ensure temp exists - if (!fs.exists(tempdir) && !fs.mkdirs(tempdir)) { - throw new IOException("HBase temp directory '" + tempdir + "' creation failure."); + if (!fs.exists(tempPath.getParent()) && !fs.mkdirs(tempPath.getParent())) { + throw new IOException("HBase temp directory '" + tempPath.getParent() + "' creation failure."); } - if (!fs.rename(path, tempPath)) { - throw new IOException("Unable to move '" + path + "' to temp '" + tempPath + "'"); + if (!fs.rename(srcPath, tempPath)) { + throw new IOException("Unable to move '" + srcPath + "' to temp '" + tempPath + "'"); } return tempPath; } - /** - * Move the specified table to the hbase temp directory - * @param tableName Table name to move - * @return The temp location of the table moved - * @throws IOException in case of file-system failure - */ - public Path moveTableToTemp(byte[] tableName) throws IOException { - return moveToTemp(HTableDescriptor.getTableDir(this.rootdir, tableName)); - } - public void updateRegionInfo(HRegionInfo region) { // TODO implement this. i think this is currently broken in trunk i don't // see this getting updated. @@ -573,7 +565,7 @@ public class MasterFileSystem { public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName) throws IOException { // archive family store files - Path tableDir = new Path(rootdir, region.getTableNameAsString()); + Path tableDir = FSUtils.getTableDir(rootdir, region.getTableName()); HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName); // delete the family folder @@ -600,9 +592,9 @@ public class MasterFileSystem { * @return Modified HTableDescriptor with requested column deleted. * @throws IOException */ - public HTableDescriptor deleteColumn(byte[] tableName, byte[] familyName) + public HTableDescriptor deleteColumn(TableName tableName, byte[] familyName) throws IOException { - LOG.info("DeleteColumn. Table = " + Bytes.toString(tableName) + LOG.info("DeleteColumn. Table = " + tableName + " family = " + Bytes.toString(familyName)); HTableDescriptor htd = this.services.getTableDescriptors().get(tableName); htd.removeFamily(familyName); @@ -617,9 +609,9 @@ public class MasterFileSystem { * @return Modified HTableDescriptor with the column modified. * @throws IOException */ - public HTableDescriptor modifyColumn(byte[] tableName, HColumnDescriptor hcd) + public HTableDescriptor modifyColumn(TableName tableName, HColumnDescriptor hcd) throws IOException { - LOG.info("AddModifyColumn. Table = " + Bytes.toString(tableName) + LOG.info("AddModifyColumn. Table = " + tableName + " HCD = " + hcd.toString()); HTableDescriptor htd = this.services.getTableDescriptors().get(tableName); @@ -640,9 +632,9 @@ public class MasterFileSystem { * @return Modified HTableDescriptor with new column added. * @throws IOException */ - public HTableDescriptor addColumn(byte[] tableName, HColumnDescriptor hcd) + public HTableDescriptor addColumn(TableName tableName, HColumnDescriptor hcd) throws IOException { - LOG.info("AddColumn. Table = " + Bytes.toString(tableName) + " HCD = " + + LOG.info("AddColumn. Table = " + tableName + " HCD = " + hcd.toString()); HTableDescriptor htd = this.services.getTableDescriptors().get(tableName); if (htd == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 5f7c2ac..a56433a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -19,11 +19,14 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; +import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableNotDisabledException; @@ -75,7 +78,7 @@ public interface MasterServices extends Server { * @throws IOException */ // We actually throw the exceptions mentioned in the - void checkTableModifiable(final byte[] tableName) + void checkTableModifiable(final TableName tableName) throws IOException, TableNotFoundException, TableNotDisabledException; /** @@ -92,7 +95,7 @@ public interface MasterServices extends Server { * @param tableName The table name * @throws IOException */ - void deleteTable(final byte[] tableName) throws IOException; + void deleteTable(final TableName tableName) throws IOException; /** * Modify the descriptor of an existing table @@ -100,7 +103,7 @@ public interface MasterServices extends Server { * @param descriptor The updated table descriptor * @throws IOException */ - void modifyTable(final byte[] tableName, final HTableDescriptor descriptor) + void modifyTable(final TableName tableName, final HTableDescriptor descriptor) throws IOException; /** @@ -108,14 +111,15 @@ public interface MasterServices extends Server { * @param tableName The table name * @throws IOException */ - void enableTable(final byte[] tableName) throws IOException; + void enableTable(final TableName tableName) throws IOException; /** * Disable an existing table * @param tableName The table name * @throws IOException */ - void disableTable(final byte[] tableName) throws IOException; + void disableTable(final TableName tableName) throws IOException; + /** * Add a new column to an existing table @@ -123,7 +127,7 @@ public interface MasterServices extends Server { * @param column The column definition * @throws IOException */ - void addColumn(final byte[] tableName, final HColumnDescriptor column) + void addColumn(final TableName tableName, final HColumnDescriptor column) throws IOException; /** @@ -132,7 +136,7 @@ public interface MasterServices extends Server { * @param descriptor The updated column definition * @throws IOException */ - void modifyColumn(byte[] tableName, HColumnDescriptor descriptor) + void modifyColumn(TableName tableName, HColumnDescriptor descriptor) throws IOException; /** @@ -141,7 +145,7 @@ public interface MasterServices extends Server { * @param columnName The column name * @throws IOException */ - void deleteColumn(final byte[] tableName, final byte[] columnName) + void deleteColumn(final TableName tableName, final byte[] columnName) throws IOException; /** @@ -187,4 +191,47 @@ public interface MasterServices extends Server { */ boolean isInitialized(); + /** + * Create a new namespace + * @param descriptor descriptor which describes the new namespace + * @throws IOException + */ + public void createNamespace(NamespaceDescriptor descriptor) throws IOException; + + /** + * Modify an existing namespace + * @param descriptor descriptor which updates the existing namespace + * @throws IOException + */ + public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException; + + /** + * Delete an existing namespace. Only empty namespaces (no tables) can be removed. + * @param name namespace name + * @throws IOException + */ + public void deleteNamespace(String name) throws IOException; + + /** + * Get a namespace descriptor by name + * @param name name of namespace descriptor + * @return + * @throws IOException + */ + public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException; + + /** + * List available namespace descriptors + * @return + * @throws IOException + */ + public List listNamespaceDescriptors() throws IOException; + + /** + * Get list of table descriptors by namespace + * @param name namespace name + * @return + * @throws IOException + */ + public List getTableDescriptorsByNamespace(String name) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/NamespaceJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/NamespaceJanitor.java new file mode 100644 index 0000000..6b4bc5d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/NamespaceJanitor.java @@ -0,0 +1,150 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import com.google.common.collect.Sets; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Chore; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; + +import java.io.IOException; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * A janitor for the namespace artifacts. + * Traverses hdfs and zk to remove orphaned directories/znodes + */ +@InterfaceAudience.Private +public class NamespaceJanitor extends Chore { + private static final Log LOG = LogFactory.getLog(NamespaceJanitor.class.getName()); + private final MasterServices services; + private AtomicBoolean enabled = new AtomicBoolean(true); + private AtomicBoolean alreadyRunning = new AtomicBoolean(false); + + public NamespaceJanitor(final MasterServices services) { + super("NamespaceJanitor-" + services.getServerName().toShortString(), + services.getConfiguration().getInt("hbase.namespacejanitor.interval", 30000), + services); + this.services = services; + } + + @Override + protected boolean initialChore() { + try { + if (this.enabled.get()) removeOrphans(); + } catch (IOException e) { + LOG.warn("Failed NamespaceJanitor chore", e); + return false; + } catch (KeeperException e) { + LOG.warn("Failed NamespaceJanitor chore", e); + return false; + } + return true; + } + + /** + * @param enabled + */ + public boolean setEnabled(final boolean enabled) { + return this.enabled.getAndSet(enabled); + } + + boolean getEnabled() { + return this.enabled.get(); + } + + @Override + protected void chore() { + try { + if (this.enabled.get()) { + removeOrphans(); + } else { + LOG.warn("NamepsaceJanitor disabled! Not running scan."); + } + } catch (IOException e) { + LOG.warn("Failed NamespaceJanitor chore", e); + } catch (KeeperException e) { + LOG.warn("Failed NamespaceJanitor chore", e); + } + } + + private void removeOrphans() throws IOException, KeeperException { + //cache the info so we don't need to keep the master nsLock for long + //and not be wasteful with rpc calls + FileSystem fs = services.getMasterFileSystem().getFileSystem(); + Set descs = Sets.newHashSet(); + for(NamespaceDescriptor ns : services.listNamespaceDescriptors()) { + descs.add(ns.getName()); + } + + //cleanup hdfs orphans + for (FileStatus nsStatus : FSUtils.listStatus(fs, + new Path(FSUtils.getRootDir(services.getConfiguration()), HConstants.BASE_NAMESPACE_DIR))) { + if (!descs.contains(nsStatus.getPath().getName()) && + !NamespaceDescriptor.RESERVED_NAMESPACES.contains(nsStatus.getPath().getName())) { + boolean isEmpty = true; + for(FileStatus status : fs.listStatus(nsStatus.getPath())) { + if (!HConstants.HBASE_NON_TABLE_DIRS.contains(status.getPath().getName())) { + isEmpty = false; + break; + } + } + if(isEmpty) { + try { + if (!fs.delete(nsStatus.getPath(), true)) { + LOG.error("Failed to remove namespace directory: " + nsStatus.getPath()); + } + } catch (IOException ex) { + LOG.error("Failed to remove namespace directory: " + nsStatus.getPath(), + ex); + } + LOG.debug("Removed namespace directory: "+nsStatus.getPath()); + } else { + LOG.debug("Skipping non-empty namespace directory: " + nsStatus.getPath()); + } + } + } + + String baseZnode = ZooKeeperWatcher.namespaceZNode; + for(String child : ZKUtil.listChildrenNoWatch(services.getZooKeeper(), baseZnode)) { + if (!descs.contains(child) && + !NamespaceDescriptor.RESERVED_NAMESPACES.contains(child)) { + String znode = ZKUtil.joinZNode(baseZnode, child); + try { + ZKUtil.deleteNode(services.getZooKeeper(), znode); + LOG.debug("Removed namespace znode: " + znode); + } catch (KeeperException ex) { + LOG.debug("Failed to remove namespace znode: " + znode, ex); + } + } + } + + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index 47d07dd..972f373 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -29,6 +29,7 @@ import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.RegionTransition; import org.apache.hadoop.hbase.Server; @@ -417,13 +418,13 @@ public class RegionStates { * @param tableName * @return Online regions from tableName */ - public synchronized List getRegionsOfTable(byte[] tableName) { + public synchronized List getRegionsOfTable(TableName tableName) { List tableRegions = new ArrayList(); // boundary needs to have table's name but regionID 0 so that it is sorted // before all table's regions. HRegionInfo boundary = new HRegionInfo(tableName, null, null, false, 0L); for (HRegionInfo hri: regionAssignments.tailMap(boundary).keySet()) { - if(!Bytes.equals(hri.getTableName(), tableName)) break; + if(!hri.getTableName().equals(tableName)) break; tableRegions.add(hri); } return tableRegions; @@ -503,9 +504,10 @@ public class RegionStates { * * @return A clone of current assignments by table. */ - protected Map>> getAssignmentsByTable() { - Map>> result = - new HashMap>>(); + protected Map>> + getAssignmentsByTable() { + Map>> result = + new HashMap>>(); synchronized (this) { if (!server.getConfiguration().getBoolean("hbase.master.loadbalance.bytable", false)) { Map> svrToRegions = @@ -513,12 +515,12 @@ public class RegionStates { for (Map.Entry> e: serverHoldings.entrySet()) { svrToRegions.put(e.getKey(), new ArrayList(e.getValue())); } - result.put("ensemble", svrToRegions); + result.put(TableName.valueOf("ensemble"), svrToRegions); } else { for (Map.Entry> e: serverHoldings.entrySet()) { for (HRegionInfo hri: e.getValue()) { if (hri.isMetaRegion()) continue; - String tablename = hri.getTableNameAsString(); + TableName tablename = hri.getTableName(); Map> svrToRegions = result.get(tablename); if (svrToRegions == null) { svrToRegions = new HashMap>(serverHoldings.size()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java index a616b92..dcaef2b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java @@ -27,6 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.InterProcessLock; import org.apache.hadoop.hbase.InterProcessLock.MetadataHandler; import org.apache.hadoop.hbase.InterProcessReadWriteLock; @@ -41,7 +42,6 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.zookeeper.lock.ZKInterProcessReadWriteLock; import org.apache.zookeeper.KeeperException; -import com.google.protobuf.ByteString; import com.google.protobuf.InvalidProtocolBufferException; /** @@ -104,7 +104,7 @@ public abstract class TableLockManager { * @param purpose Human readable reason for locking the table * @return A new TableLock object for acquiring a write lock */ - public abstract TableLock writeLock(byte[] tableName, String purpose); + public abstract TableLock writeLock(TableName tableName, String purpose); /** * Returns a TableLock for locking the table for shared access among read-lock holders @@ -112,7 +112,7 @@ public abstract class TableLockManager { * @param purpose Human readable reason for locking the table * @return A new TableLock object for acquiring a read lock */ - public abstract TableLock readLock(byte[] tableName, String purpose); + public abstract TableLock readLock(TableName tableName, String purpose); /** * Visits all table locks(read and write), and lock attempts with the given callback @@ -148,7 +148,7 @@ public abstract class TableLockManager { * @param tableName name of the table * @throws IOException If there is an unrecoverable error releasing the lock */ - public abstract void tableDeleted(byte[] tableName) + public abstract void tableDeleted(TableName tableName) throws IOException; /** @@ -186,11 +186,11 @@ public abstract class TableLockManager { } } @Override - public TableLock writeLock(byte[] tableName, String purpose) { + public TableLock writeLock(TableName tableName, String purpose) { return new NullTableLock(); } @Override - public TableLock readLock(byte[] tableName, String purpose) { + public TableLock readLock(TableName tableName, String purpose) { return new NullTableLock(); } @Override @@ -200,7 +200,7 @@ public abstract class TableLockManager { public void reapWriteLocks() throws IOException { } @Override - public void tableDeleted(byte[] tableName) throws IOException { + public void tableDeleted(TableName tableName) throws IOException { } @Override public void visitAllLocks(MetadataHandler handler) throws IOException { @@ -249,18 +249,16 @@ public abstract class TableLockManager { private static class TableLockImpl implements TableLock { long lockTimeoutMs; - byte[] tableName; - String tableNameStr; + TableName tableName; InterProcessLock lock; boolean isShared; ZooKeeperWatcher zkWatcher; ServerName serverName; String purpose; - public TableLockImpl(byte[] tableName, ZooKeeperWatcher zkWatcher, + public TableLockImpl(TableName tableName, ZooKeeperWatcher zkWatcher, ServerName serverName, long lockTimeoutMs, boolean isShared, String purpose) { this.tableName = tableName; - tableNameStr = Bytes.toString(tableName); this.zkWatcher = zkWatcher; this.serverName = serverName; this.lockTimeoutMs = lockTimeoutMs; @@ -272,7 +270,7 @@ public abstract class TableLockManager { public void acquire() throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("Attempt to acquire table " + (isShared ? "read" : "write") + - " lock on: " + tableNameStr + " for:" + purpose); + " lock on: " + tableName + " for:" + purpose); } lock = createTableLock(); @@ -283,47 +281,48 @@ public abstract class TableLockManager { } else { if (!lock.tryAcquire(lockTimeoutMs)) { throw new LockTimeoutException("Timed out acquiring " + - (isShared ? "read" : "write") + "lock for table:" + tableNameStr + + (isShared ? "read" : "write") + "lock for table:" + tableName + "for:" + purpose + " after " + lockTimeoutMs + " ms."); } } } catch (InterruptedException e) { - LOG.warn("Interrupted acquiring a lock for " + tableNameStr, e); + LOG.warn("Interrupted acquiring a lock for " + tableName, e); Thread.currentThread().interrupt(); throw new InterruptedIOException("Interrupted acquiring a lock"); } if (LOG.isTraceEnabled()) LOG.trace("Acquired table " + (isShared ? "read" : "write") - + " lock on " + tableNameStr + " for " + purpose); + + " lock on " + tableName + " for " + purpose); } @Override public void release() throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("Attempt to release table " + (isShared ? "read" : "write") - + " lock on " + tableNameStr); + + " lock on " + tableName); } if (lock == null) { - throw new IllegalStateException("Table " + tableNameStr + + throw new IllegalStateException("Table " + tableName + " is not locked!"); } try { lock.release(); } catch (InterruptedException e) { - LOG.warn("Interrupted while releasing a lock for " + tableNameStr); + LOG.warn("Interrupted while releasing a lock for " + tableName); Thread.currentThread().interrupt(); throw new InterruptedIOException(); } if (LOG.isTraceEnabled()) { - LOG.trace("Released table lock on " + tableNameStr); + LOG.trace("Released table lock on " + tableName); } } private InterProcessLock createTableLock() { - String tableLockZNode = ZKUtil.joinZNode(zkWatcher.tableLockZNode, tableNameStr); + String tableLockZNode = ZKUtil.joinZNode(zkWatcher.tableLockZNode, + tableName.getNameAsString()); ZooKeeperProtos.TableLock data = ZooKeeperProtos.TableLock.newBuilder() - .setTableName(ByteString.copyFrom(tableName)) + .setTableName(ProtobufUtil.toProtoTableName(tableName)) .setLockOwner(ProtobufUtil.toServerName(serverName)) .setThreadId(Thread.currentThread().getId()) .setPurpose(purpose) @@ -367,12 +366,12 @@ public abstract class TableLockManager { } @Override - public TableLock writeLock(byte[] tableName, String purpose) { + public TableLock writeLock(TableName tableName, String purpose) { return new TableLockImpl(tableName, zkWatcher, serverName, writeLockTimeoutMs, false, purpose); } - public TableLock readLock(byte[] tableName, String purpose) { + public TableLock readLock(TableName tableName, String purpose) { return new TableLockImpl(tableName, zkWatcher, serverName, readLockTimeoutMs, true, purpose); } @@ -435,9 +434,9 @@ public abstract class TableLockManager { } @Override - public void tableDeleted(byte[] tableName) throws IOException { + public void tableDeleted(TableName tableName) throws IOException { //table write lock from DeleteHandler is already released, just delete the parent znode - String tableNameStr = Bytes.toString(tableName); + String tableNameStr = tableName.getNameAsString(); String tableLockZNode = ZKUtil.joinZNode(zkWatcher.tableLockZNode, tableNameStr); try { ZKUtil.deleteNode(zkWatcher, tableLockZNode); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java new file mode 100644 index 0000000..7333923 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java @@ -0,0 +1,224 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master; + +import java.io.IOException; +import java.util.NavigableSet; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.ZKNamespaceManager; +import org.apache.hadoop.hbase.catalog.MetaReader; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.master.handler.CreateTableHandler; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.util.Bytes; + +import com.google.common.collect.Sets; +import org.apache.hadoop.hbase.util.FSUtils; + +/** + * This is a helper class used to manage the namespace + * metadata that is stored in {@see HConstants.NAMESPACE_TABLE_NAME} + * It also mirrors updates to the ZK store by forwarding updates to + * {@link org.apache.hadoop.hbase.ZKNamespaceManager} + */ +@InterfaceAudience.Private +public class TableNamespaceManager { + private static final Log LOG = LogFactory.getLog(TableNamespaceManager.class); + + private Configuration conf; + private MasterServices masterServices; + private HTable table; + private ZKNamespaceManager zkNamespaceManager; + + public TableNamespaceManager(MasterServices masterServices) throws IOException { + this.masterServices = masterServices; + this.conf = masterServices.getConfiguration(); + } + + public void start() throws IOException { + TableName tableName = TableName.NAMESPACE_TABLE_NAME; + try { + if (!MetaReader.tableExists(masterServices.getCatalogTracker(), + tableName)) { + LOG.info("Namespace table not found. Creating..."); + createNamespaceTable(masterServices); + } + } catch (InterruptedException e) { + throw new IOException("Wait for namespace table assignment interrupted", e); + } + table = new HTable(conf, tableName); + zkNamespaceManager = new ZKNamespaceManager(masterServices.getZooKeeper()); + zkNamespaceManager.start(); + + if (get(NamespaceDescriptor.DEFAULT_NAMESPACE.getName()) == null) { + create(NamespaceDescriptor.DEFAULT_NAMESPACE); + } + if (get(NamespaceDescriptor.SYSTEM_NAMESPACE.getName()) == null) { + create(NamespaceDescriptor.SYSTEM_NAMESPACE); + } + + ResultScanner scanner = table.getScanner(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES); + try { + for(Result result : scanner) { + NamespaceDescriptor ns = + ProtobufUtil.toNamespaceDescriptor( + HBaseProtos.NamespaceDescriptor.parseFrom( + result.getColumnLatest(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES, + HTableDescriptor.NAMESPACE_COL_DESC_BYTES).getValue())); + zkNamespaceManager.update(ns); + } + } finally { + scanner.close(); + } + } + + + public synchronized NamespaceDescriptor get(String name) throws IOException { + Result res = table.get(new Get(Bytes.toBytes(name))); + if (res.isEmpty()) { + return null; + } + return + ProtobufUtil.toNamespaceDescriptor( + HBaseProtos.NamespaceDescriptor.parseFrom( + res.getColumnLatest(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES, + HTableDescriptor.NAMESPACE_COL_DESC_BYTES).getValue())); + } + + public synchronized void create(NamespaceDescriptor ns) throws IOException { + if (get(ns.getName()) != null) { + throw new ConstraintException("Namespace "+ns.getName()+" already exists"); + } + FileSystem fs = masterServices.getMasterFileSystem().getFileSystem(); + fs.mkdirs(FSUtils.getNamespaceDir( + masterServices.getMasterFileSystem().getRootDir(), ns.getName())); + upsert(ns); + } + + public synchronized void update(NamespaceDescriptor ns) throws IOException { + if (get(ns.getName()) == null) { + throw new ConstraintException("Namespace "+ns.getName()+" does not exist"); + } + upsert(ns); + } + + private void upsert(NamespaceDescriptor ns) throws IOException { + Put p = new Put(Bytes.toBytes(ns.getName())); + p.add(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES, + HTableDescriptor.NAMESPACE_COL_DESC_BYTES, + ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray()); + table.put(p); + try { + zkNamespaceManager.update(ns); + } catch(IOException ex) { + String msg = "Failed to update namespace information in ZK. Aborting."; + LOG.fatal(msg, ex); + masterServices.abort(msg, ex); + } + } + + public synchronized void remove(String name) throws IOException { + if (NamespaceDescriptor.RESERVED_NAMESPACES.contains(name)) { + throw new ConstraintException("Reserved namespace "+name+" cannot be removed."); + } + int tableCount = masterServices.getTableDescriptorsByNamespace(name).size(); + if (tableCount > 0) { + throw new ConstraintException("Only empty namespaces can be removed. " + + "Namespace "+name+" has "+tableCount+" tables"); + } + Delete d = new Delete(Bytes.toBytes(name)); + table.delete(d); + //don't abort if cleanup isn't complete + //it will be replaced on new namespace creation + zkNamespaceManager.remove(name); + FileSystem fs = masterServices.getMasterFileSystem().getFileSystem(); + for(FileStatus status : + fs.listStatus(FSUtils.getNamespaceDir( + masterServices.getMasterFileSystem().getRootDir(), name))) { + if (!HConstants.HBASE_NON_TABLE_DIRS.contains(status.getPath().getName())) { + throw new IOException("Namespace directory contains table dir: "+status.getPath()); + } + } + if (!fs.delete(FSUtils.getNamespaceDir( + masterServices.getMasterFileSystem().getRootDir(), name), true)) { + throw new IOException("Failed to remove namespace: "+name); + } + } + + public synchronized NavigableSet list() throws IOException { + NavigableSet ret = + Sets.newTreeSet(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR); + ResultScanner scanner = table.getScanner(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES); + try { + for(Result r : scanner) { + ret.add(ProtobufUtil.toNamespaceDescriptor( + HBaseProtos.NamespaceDescriptor.parseFrom( + r.getColumnLatest(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES, + HTableDescriptor.NAMESPACE_COL_DESC_BYTES).getValue()))); + } + } finally { + scanner.close(); + } + return ret; + } + + private void createNamespaceTable(MasterServices masterServices) throws IOException, InterruptedException { + HRegionInfo newRegions[] = new HRegionInfo[]{ + new HRegionInfo(HTableDescriptor.NAMESPACE_TABLEDESC.getTableName(), null, null)}; + + //we need to create the table this way to bypass + //checkInitialized + masterServices.getExecutorService() + .submit(new CreateTableHandler(masterServices, + masterServices.getMasterFileSystem(), + HTableDescriptor.NAMESPACE_TABLEDESC, + masterServices.getConfiguration(), + newRegions, + masterServices).prepare()); + //wait for region to be online + int tries = conf.getInt("hbase.master.namespace.init.timeout", 600); + while(masterServices.getAssignmentManager() + .getRegionStates().getRegionServerOfRegion(newRegions[0]) == null && + tries > 0) { + Thread.sleep(100); + tries--; + } + if (tries <= 0) { + throw new IOException("Failed to create namespace table."); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java index 0326c20..74d6d80 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.LoadBalancer; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.util.Bytes; import com.google.common.base.Joiner; import com.google.common.collect.ArrayListMultimap; @@ -145,7 +144,7 @@ public abstract class BaseLoadBalancer implements LoadBalancer { regionPerServerIndex = 0; for (HRegionInfo region : entry.getValue()) { - String tableName = region.getTableNameAsString(); + String tableName = region.getTableName().getNameAsString(); Integer idx = tablesToIndex.get(tableName); if (idx == null) { tables.add(tableName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java index a452402..370f8c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java @@ -34,6 +34,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; @@ -101,7 +102,7 @@ public class FavoredNodeAssignmentHelper { * @throws IOException */ public static Map fullScan( - CatalogTracker catalogTracker, final Set disabledTables, + CatalogTracker catalogTracker, final Set disabledTables, final boolean excludeOfflinedSplitParents, FavoredNodeLoadBalancer balancer) throws IOException { final Map regions = @@ -115,9 +116,9 @@ public class FavoredNodeAssignmentHelper { Pair region = HRegionInfo.getHRegionInfoAndServerName(r); HRegionInfo hri = region.getFirst(); if (hri == null) return true; - if (hri.getTableNameAsString() == null) return true; + if (hri.getTableName() == null) return true; if (disabledTables.contains( - hri.getTableNameAsString())) return true; + hri.getTableName())) return true; // Are we to include split parents in the list? if (excludeOfflinedSplitParents && hri.isSplitParent()) return true; regions.put(hri, region.getSecond()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java index 02cca53..af950a0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.java @@ -31,13 +31,13 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.util.Bytes; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; @@ -144,15 +144,15 @@ class RegionLocationFinder { * @return HTableDescriptor * @throws IOException */ - protected HTableDescriptor getTableDescriptor(byte[] tableName) throws IOException { + protected HTableDescriptor getTableDescriptor(TableName tableName) throws IOException { HTableDescriptor tableDescriptor = null; try { if (this.services != null) { - tableDescriptor = this.services.getTableDescriptors().get(Bytes.toString(tableName)); + tableDescriptor = this.services.getTableDescriptors().get(tableName); } } catch (FileNotFoundException fnfe) { LOG.debug("FileNotFoundException during getTableDescriptors." + " Current table name = " - + Bytes.toStringBinary(tableName), fnfe); + + tableName, fnfe); } return tableDescriptor; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java index 9d926e1..5846d3e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java @@ -92,7 +92,7 @@ public class ClosedRegionHandler extends EventHandler implements TotesHRegionInf LOG.debug("Handling CLOSED event for " + regionInfo.getEncodedName()); // Check if this table is being disabled or not if (this.assignmentManager.getZKTable(). - isDisablingOrDisabledTable(this.regionInfo.getTableNameAsString())) { + isDisablingOrDisabledTable(this.regionInfo.getTableName())) { assignmentManager.offlineDisabledRegion(regionInfo); return; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java index 677b4d4..b9246ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java @@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; @@ -46,6 +47,7 @@ import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.ModifyRegionUtils; import org.apache.zookeeper.KeeperException; @@ -77,7 +79,7 @@ public class CreateTableHandler extends EventHandler { this.assignmentManager = masterServices.getAssignmentManager(); this.tableLockManager = masterServices.getTableLockManager(); - this.tableLock = this.tableLockManager.writeLock(this.hTableDescriptor.getName() + this.tableLock = this.tableLockManager.writeLock(this.hTableDescriptor.getTableName() , EventType.C_M_CREATE_TABLE.toString()); } @@ -100,7 +102,7 @@ public class CreateTableHandler extends EventHandler { this.tableLock.acquire(); boolean success = false; try { - String tableName = this.hTableDescriptor.getNameAsString(); + TableName tableName = this.hTableDescriptor.getTableName(); if (MetaReader.tableExists(catalogTracker, tableName)) { throw new TableExistsException(tableName); } @@ -137,12 +139,12 @@ public class CreateTableHandler extends EventHandler { name = server.getServerName().toString(); } return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + - this.hTableDescriptor.getNameAsString(); + this.hTableDescriptor.getTableName(); } @Override public void process() { - String tableName = this.hTableDescriptor.getNameAsString(); + TableName tableName = this.hTableDescriptor.getTableName(); LOG.info("Create table " + tableName); try { @@ -174,11 +176,11 @@ public class CreateTableHandler extends EventHandler { // It will block the creation saying TableAlreadyExists. try { this.assignmentManager.getZKTable().removeEnablingTable( - this.hTableDescriptor.getNameAsString(), false); + this.hTableDescriptor.getTableName(), false); } catch (KeeperException e) { // Keeper exception should not happen here LOG.error("Got a keeper exception while removing the ENABLING table znode " - + this.hTableDescriptor.getNameAsString(), e); + + this.hTableDescriptor.getTableName(), e); } } } @@ -197,19 +199,19 @@ public class CreateTableHandler extends EventHandler { * [If something fails here: we still have the table in disabled state] * - Update ZooKeeper with the enabled state */ - private void handleCreateTable(String tableName) throws IOException, KeeperException { + private void handleCreateTable(TableName tableName) + throws IOException, KeeperException { Path tempdir = fileSystemManager.getTempDir(); FileSystem fs = fileSystemManager.getFileSystem(); // 1. Create Table Descriptor - Path tempTableDir = new Path(tempdir, tableName); + Path tempTableDir = FSUtils.getTableDir(tempdir, tableName); new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory( tempTableDir, this.hTableDescriptor, false); - Path tableDir = new Path(fileSystemManager.getRootDir(), tableName); + Path tableDir = FSUtils.getTableDir(fileSystemManager.getRootDir(), tableName); // 2. Create Regions List regionInfos = handleCreateHdfsRegions(tempdir, tableName); - // 3. Move Table temp directory to the hbase root location if (!fs.rename(tempTableDir, tableDir)) { throw new IOException("Unable to move table from temp=" + tempTableDir + @@ -258,7 +260,7 @@ public class CreateTableHandler extends EventHandler { * @return the list of regions created */ protected List handleCreateHdfsRegions(final Path tableRootDir, - final String tableName) + final TableName tableName) throws IOException { return ModifyRegionUtils.createRegions(conf, tableRootDir, hTableDescriptor, newRegions, null); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java index 8bebfcb..a884a3d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java @@ -26,6 +26,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.backup.HFileArchiver; @@ -46,7 +47,7 @@ import org.apache.zookeeper.KeeperException; public class DeleteTableHandler extends TableEventHandler { private static final Log LOG = LogFactory.getLog(DeleteTableHandler.class); - public DeleteTableHandler(byte [] tableName, Server server, + public DeleteTableHandler(TableName tableName, Server server, final MasterServices masterServices) { super(EventType.C_M_DELETE_TABLE, tableName, server, masterServices); } @@ -111,16 +112,15 @@ public class DeleteTableHandler extends TableEventHandler { LOG.error("Couldn't delete " + tempTableDir); } - LOG.debug("Table '" + Bytes.toString(tableName) + "' archived!"); + LOG.debug("Table '" + tableName + "' archived!"); } finally { - String tableNameStr = Bytes.toString(tableName); // 6. Update table descriptor cache - LOG.debug("Removing '" + tableNameStr + "' descriptor."); - this.masterServices.getTableDescriptors().remove(Bytes.toString(tableName)); + LOG.debug("Removing '" + tableName + "' descriptor."); + this.masterServices.getTableDescriptors().remove(tableName); // 7. If entry for this table in zk, and up in AssignmentManager, remove it. - LOG.debug("Marking '" + tableNameStr + "' as deleted."); - am.getZKTable().setDeletedTable(tableNameStr); + LOG.debug("Marking '" + tableName + "' as deleted."); + am.getZKTable().setDeletedTable(tableName); } if (cpHost != null) { @@ -144,6 +144,6 @@ public class DeleteTableHandler extends TableEventHandler { if(server != null && server.getServerName() != null) { name = server.getServerName().toString(); } - return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + tableNameStr; + return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + tableName; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java index 7812e1a..f07669f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java @@ -25,12 +25,14 @@ import java.util.concurrent.ExecutorService; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.catalog.MetaReader; +import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.AssignmentManager; @@ -41,7 +43,6 @@ import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.zookeeper.KeeperException; import org.cloudera.htrace.Trace; @@ -51,20 +52,18 @@ import org.cloudera.htrace.Trace; @InterfaceAudience.Private public class DisableTableHandler extends EventHandler { private static final Log LOG = LogFactory.getLog(DisableTableHandler.class); - private final byte [] tableName; - private final String tableNameStr; + private final TableName tableName; private final AssignmentManager assignmentManager; private final TableLockManager tableLockManager; private final CatalogTracker catalogTracker; private final boolean skipTableStateCheck; private TableLock tableLock; - public DisableTableHandler(Server server, byte [] tableName, + public DisableTableHandler(Server server, TableName tableName, CatalogTracker catalogTracker, AssignmentManager assignmentManager, TableLockManager tableLockManager, boolean skipTableStateCheck) { super(server, EventType.C_M_DISABLE_TABLE); this.tableName = tableName; - this.tableNameStr = Bytes.toString(this.tableName); this.assignmentManager = assignmentManager; this.catalogTracker = catalogTracker; this.tableLockManager = tableLockManager; @@ -73,6 +72,9 @@ public class DisableTableHandler extends EventHandler { public DisableTableHandler prepare() throws TableNotFoundException, TableNotEnabledException, IOException { + if(tableName.equals(TableName.META_TABLE_NAME)) { + throw new ConstraintException("Cannot disable catalog table"); + } //acquire the table write lock, blocking this.tableLock = this.tableLockManager.writeLock(tableName, EventType.C_M_DISABLE_TABLE.toString()); @@ -81,8 +83,8 @@ public class DisableTableHandler extends EventHandler { boolean success = false; try { // Check if table exists - if (!MetaReader.tableExists(catalogTracker, this.tableNameStr)) { - throw new TableNotFoundException(this.tableNameStr); + if (!MetaReader.tableExists(catalogTracker, tableName)) { + throw new TableNotFoundException(tableName); } // There could be multiple client requests trying to disable or enable @@ -93,9 +95,9 @@ public class DisableTableHandler extends EventHandler { if (!skipTableStateCheck) { try { if (!this.assignmentManager.getZKTable().checkEnabledAndSetDisablingTable - (this.tableNameStr)) { - LOG.info("Table " + tableNameStr + " isn't enabled; skipping disable"); - throw new TableNotEnabledException(this.tableNameStr); + (this.tableName)) { + LOG.info("Table " + tableName + " isn't enabled; skipping disable"); + throw new TableNotEnabledException(this.tableName); } } catch (KeeperException e) { throw new IOException("Unable to ensure that the table will be" + @@ -119,13 +121,13 @@ public class DisableTableHandler extends EventHandler { name = server.getServerName().toString(); } return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + - tableNameStr; + tableName; } @Override public void process() { try { - LOG.info("Attempting to disable table " + this.tableNameStr); + LOG.info("Attempting to disable table " + this.tableName); MasterCoprocessorHost cpHost = ((HMaster) this.server) .getCoprocessorHost(); if (cpHost != null) { @@ -136,9 +138,9 @@ public class DisableTableHandler extends EventHandler { cpHost.postDisableTableHandler(this.tableName); } } catch (IOException e) { - LOG.error("Error trying to disable table " + this.tableNameStr, e); + LOG.error("Error trying to disable table " + this.tableName, e); } catch (KeeperException e) { - LOG.error("Error trying to disable table " + this.tableNameStr, e); + LOG.error("Error trying to disable table " + this.tableName, e); } finally { releaseTableLock(); } @@ -156,7 +158,7 @@ public class DisableTableHandler extends EventHandler { private void handleDisableTable() throws IOException, KeeperException { // Set table disabling flag up in zk. - this.assignmentManager.getZKTable().setDisablingTable(this.tableNameStr); + this.assignmentManager.getZKTable().setDisablingTable(this.tableName); boolean done = false; while (true) { // Get list of online regions that are of this table. Regions that are @@ -184,7 +186,7 @@ public class DisableTableHandler extends EventHandler { } } // Flip the table to disabled if success. - if (done) this.assignmentManager.getZKTable().setDisabledTable(this.tableNameStr); + if (done) this.assignmentManager.getZKTable().setDisabledTable(this.tableName); LOG.info("Disabled table is done=" + done); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java index 8c15d05..8db3161 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java @@ -26,6 +26,7 @@ import java.util.concurrent.ExecutorService; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; @@ -44,7 +45,6 @@ import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.zookeeper.KeeperException; import org.cloudera.htrace.Trace; @@ -55,20 +55,18 @@ import org.cloudera.htrace.Trace; @InterfaceAudience.Private public class EnableTableHandler extends EventHandler { private static final Log LOG = LogFactory.getLog(EnableTableHandler.class); - private final byte [] tableName; - private final String tableNameStr; + private final TableName tableName; private final AssignmentManager assignmentManager; private final TableLockManager tableLockManager; private final CatalogTracker catalogTracker; private boolean retainAssignment = false; private TableLock tableLock; - public EnableTableHandler(Server server, byte [] tableName, + public EnableTableHandler(Server server, TableName tableName, CatalogTracker catalogTracker, AssignmentManager assignmentManager, TableLockManager tableLockManager, boolean skipTableStateCheck) { super(server, EventType.C_M_ENABLE_TABLE); this.tableName = tableName; - this.tableNameStr = Bytes.toString(tableName); this.catalogTracker = catalogTracker; this.assignmentManager = assignmentManager; this.tableLockManager = tableLockManager; @@ -85,16 +83,16 @@ public class EnableTableHandler extends EventHandler { boolean success = false; try { // Check if table exists - if (!MetaReader.tableExists(catalogTracker, this.tableNameStr)) { + if (!MetaReader.tableExists(catalogTracker, tableName)) { // retainAssignment is true only during recovery. In normal case it is false if (!this.retainAssignment) { - throw new TableNotFoundException(tableNameStr); + throw new TableNotFoundException(tableName); } try { - this.assignmentManager.getZKTable().removeEnablingTable(tableNameStr, true); + this.assignmentManager.getZKTable().removeEnablingTable(tableName, true); } catch (KeeperException e) { // TODO : Use HBCK to clear such nodes - LOG.warn("Failed to delete the ENABLING node for the table " + tableNameStr + LOG.warn("Failed to delete the ENABLING node for the table " + tableName + ". The table will remain unusable. Run HBCK to manually fix the problem."); } } @@ -106,9 +104,9 @@ public class EnableTableHandler extends EventHandler { if (!retainAssignment) { try { if (!this.assignmentManager.getZKTable().checkDisabledAndSetEnablingTable - (this.tableNameStr)) { - LOG.info("Table " + tableNameStr + " isn't disabled; skipping enable"); - throw new TableNotDisabledException(this.tableNameStr); + (this.tableName)) { + LOG.info("Table " + tableName + " isn't disabled; skipping enable"); + throw new TableNotDisabledException(this.tableName); } } catch (KeeperException e) { throw new IOException("Unable to ensure that the table will be" + @@ -131,13 +129,13 @@ public class EnableTableHandler extends EventHandler { name = server.getServerName().toString(); } return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + - tableNameStr; + tableName; } @Override public void process() { try { - LOG.info("Attempting to enable the table " + this.tableNameStr); + LOG.info("Attempting to enable the table " + this.tableName); MasterCoprocessorHost cpHost = ((HMaster) this.server) .getCoprocessorHost(); if (cpHost != null) { @@ -148,11 +146,11 @@ public class EnableTableHandler extends EventHandler { cpHost.postEnableTableHandler(this.tableName); } } catch (IOException e) { - LOG.error("Error trying to enable the table " + this.tableNameStr, e); + LOG.error("Error trying to enable the table " + this.tableName, e); } catch (KeeperException e) { - LOG.error("Error trying to enable the table " + this.tableNameStr, e); + LOG.error("Error trying to enable the table " + this.tableName, e); } catch (InterruptedException e) { - LOG.error("Error trying to enable the table " + this.tableNameStr, e); + LOG.error("Error trying to enable the table " + this.tableName, e); } finally { releaseTableLock(); } @@ -173,7 +171,7 @@ public class EnableTableHandler extends EventHandler { // that user first finish disabling but that might be obnoxious. // Set table enabling flag up in zk. - this.assignmentManager.getZKTable().setEnablingTable(this.tableNameStr); + this.assignmentManager.getZKTable().setEnablingTable(this.tableName); boolean done = false; // Get the regions of this table. We're done when all listed // tables are onlined. @@ -185,7 +183,7 @@ public class EnableTableHandler extends EventHandler { if (regionsCount == 0) { done = true; } - LOG.info("Table '" + this.tableNameStr + "' has " + countOfRegionsInTable + LOG.info("Table '" + this.tableName + "' has " + countOfRegionsInTable + " regions, of which " + regionsCount + " are offline."); BulkEnabler bd = new BulkEnabler(this.server, regions, countOfRegionsInTable, this.retainAssignment); @@ -195,18 +193,18 @@ public class EnableTableHandler extends EventHandler { } } catch (InterruptedException e) { LOG.warn("Enable operation was interrupted when enabling table '" - + this.tableNameStr + "'"); + + this.tableName + "'"); // Preserve the interrupt. Thread.currentThread().interrupt(); } if (done) { // Flip the table to enabled. this.assignmentManager.getZKTable().setEnabledTable( - this.tableNameStr); - LOG.info("Table '" + this.tableNameStr + this.tableName); + LOG.info("Table '" + this.tableName + "' was successfully enabled. Status: done=" + done); } else { - LOG.warn("Table '" + this.tableNameStr + LOG.warn("Table '" + this.tableName + "' wasn't successfully enabled. Status: done=" + done); } } @@ -232,7 +230,7 @@ public class EnableTableHandler extends EventHandler { } else { if (LOG.isDebugEnabled()) { LOG.debug("Skipping assign for the region " + hri + " during enable table " - + hri.getTableNameAsString() + " because its already in tranition or assigned."); + + hri.getTableName() + " because its already in tranition or assigned."); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java index 7611002..622a6ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java @@ -25,6 +25,7 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; @@ -41,7 +42,7 @@ public class ModifyTableHandler extends TableEventHandler { private final HTableDescriptor htd; - public ModifyTableHandler(final byte [] tableName, + public ModifyTableHandler(final TableName tableName, final HTableDescriptor htd, final Server server, final MasterServices masterServices) { super(EventType.C_M_MODIFY_TABLE, tableName, server, masterServices); @@ -82,7 +83,7 @@ public class ModifyTableHandler extends TableEventHandler { for (byte[] familyName: oldFamilies) { if (!newFamilies.contains(familyName)) { LOG.debug("Removing family=" + Bytes.toString(familyName) + - " from table=" + Bytes.toString(this.tableName)); + " from table=" + this.tableName); for (HRegionInfo hri: hris) { // Delete the family directory in FS for all the regions one by one mfs.deleteFamilyFromFS(hri, familyName); @@ -101,6 +102,6 @@ public class ModifyTableHandler extends TableEventHandler { name = server.getServerName().toString(); } return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + - tableNameStr; + tableName; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java index 607f66e..bd5c029 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java @@ -23,6 +23,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.executor.EventHandler; @@ -46,7 +47,8 @@ public class OpenedRegionHandler extends EventHandler implements TotesHRegionInf private enum OpenedPriority { META (1), - USER (2); + SYSTEM (2), + USER (3); private final int value; OpenedPriority(int value) { @@ -67,6 +69,9 @@ public class OpenedRegionHandler extends EventHandler implements TotesHRegionInf this.expectedVersion = expectedVersion; if(regionInfo.isMetaRegion()) { priority = OpenedPriority.META; + } else if(regionInfo.getTableName() + .getNamespaceAsString().equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { + priority = OpenedPriority.SYSTEM; } else { priority = OpenedPriority.USER; } @@ -109,7 +114,7 @@ public class OpenedRegionHandler extends EventHandler implements TotesHRegionInf } if (!openedNodeDeleted) { if (this.assignmentManager.getZKTable().isDisablingOrDisabledTable( - regionInfo.getTableNameAsString())) { + regionInfo.getTableName())) { debugLog(regionInfo, "Opened region " + regionInfo.getShortNameToLog() + " but " + "this table is disabled, triggering close of region"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java index 8695f9f..5187c01 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java @@ -244,7 +244,7 @@ public class ServerShutdownHandler extends EventHandler { toAssignRegions.add(hri); } else if (rit != null) { if ((rit.isClosing() || rit.isPendingClose()) - && am.getZKTable().isDisablingOrDisabledTable(hri.getTableNameAsString())) { + && am.getZKTable().isDisablingOrDisabledTable(hri.getTableName())) { // If the table was partially disabled and the RS went down, we should clear the RIT // and remove the node for the region. // The rit that we use may be stale in case the table was in DISABLING state @@ -330,17 +330,17 @@ public class ServerShutdownHandler extends EventHandler { AssignmentManager assignmentManager, CatalogTracker catalogTracker) throws IOException { boolean tablePresent = assignmentManager.getZKTable().isTablePresent( - hri.getTableNameAsString()); + hri.getTableName()); if (!tablePresent) { - LOG.info("The table " + hri.getTableNameAsString() + LOG.info("The table " + hri.getTableName() + " was deleted. Hence not proceeding."); return false; } // If table is not disabled but the region is offlined, boolean disabled = assignmentManager.getZKTable().isDisabledTable( - hri.getTableNameAsString()); + hri.getTableName()); if (disabled){ - LOG.info("The table " + hri.getTableNameAsString() + LOG.info("The table " + hri.getTableName() + " was disabled. Hence not proceeding."); return false; } @@ -351,9 +351,9 @@ public class ServerShutdownHandler extends EventHandler { return false; } boolean disabling = assignmentManager.getZKTable().isDisablingTable( - hri.getTableNameAsString()); + hri.getTableName()); if (disabling) { - LOG.info("The table " + hri.getTableNameAsString() + LOG.info("The table " + hri.getTableName() + " is disabled. Hence not assigning region" + hri.getEncodedName()); return false; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java index 93eebbe..b377754 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -40,7 +41,7 @@ public class TableAddFamilyHandler extends TableEventHandler { private final HColumnDescriptor familyDesc; - public TableAddFamilyHandler(byte[] tableName, HColumnDescriptor familyDesc, + public TableAddFamilyHandler(TableName tableName, HColumnDescriptor familyDesc, Server server, final MasterServices masterServices) { super(EventType.C_M_ADD_FAMILY, tableName, server, masterServices); this.familyDesc = familyDesc; @@ -82,7 +83,7 @@ public class TableAddFamilyHandler extends TableEventHandler { family = familyDesc.getNameAsString(); } return getClass().getSimpleName() + "-" + name + "-" + - getSeqid() + "-" + tableNameStr + "-" + family; + getSeqid() + "-" + tableName + "-" + family; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java index c6c4794..a4f16e3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; @@ -40,7 +41,7 @@ public class TableDeleteFamilyHandler extends TableEventHandler { private byte [] familyName; - public TableDeleteFamilyHandler(byte[] tableName, byte [] familyName, + public TableDeleteFamilyHandler(TableName tableName, byte [] familyName, Server server, final MasterServices masterServices) throws IOException { super(EventType.C_M_DELETE_FAMILY, tableName, server, masterServices); this.familyName = familyName; @@ -83,6 +84,7 @@ public class TableDeleteFamilyHandler extends TableEventHandler { if(familyName != null) { family = Bytes.toString(familyName); } - return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + tableNameStr + "-" + family; + return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + + "-" + tableName + "-" + family; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java index 20f8130..69fa526 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java @@ -29,6 +29,7 @@ import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; @@ -60,17 +61,15 @@ import com.google.common.collect.Maps; public abstract class TableEventHandler extends EventHandler { private static final Log LOG = LogFactory.getLog(TableEventHandler.class); protected final MasterServices masterServices; - protected final byte [] tableName; - protected final String tableNameStr; + protected final TableName tableName; protected TableLock tableLock; private boolean isPrepareCalled = false; - public TableEventHandler(EventType eventType, byte [] tableName, Server server, + public TableEventHandler(EventType eventType, TableName tableName, Server server, MasterServices masterServices) { super(server, eventType); this.masterServices = masterServices; this.tableName = tableName; - this.tableNameStr = Bytes.toString(this.tableName); } public TableEventHandler prepare() throws IOException { @@ -122,7 +121,7 @@ public abstract class TableEventHandler extends EventHandler { } try { LOG.info("Handling table operation " + eventType + " on table " + - Bytes.toString(tableName)); + tableName); List hris = MetaReader.getTableRegions(this.server.getCatalogTracker(), @@ -130,20 +129,20 @@ public abstract class TableEventHandler extends EventHandler { handleTableOperation(hris); if (eventType.isOnlineSchemaChangeSupported() && this.masterServices. getAssignmentManager().getZKTable(). - isEnabledTable(Bytes.toString(tableName))) { + isEnabledTable(tableName)) { if (reOpenAllRegions(hris)) { LOG.info("Completed table operation " + eventType + " on table " + - Bytes.toString(tableName)); + tableName); } else { LOG.warn("Error on reopening the regions"); } } completed(null); } catch (IOException e) { - LOG.error("Error manipulating table " + Bytes.toString(tableName), e); + LOG.error("Error manipulating table " + tableName, e); completed(e); } catch (KeeperException e) { - LOG.error("Error manipulating table " + Bytes.toString(tableName), e); + LOG.error("Error manipulating table " + tableName, e); completed(e); } finally { releaseTableLock(); @@ -226,11 +225,10 @@ public abstract class TableEventHandler extends EventHandler { */ public HTableDescriptor getTableDescriptor() throws FileNotFoundException, IOException { - final String name = Bytes.toString(tableName); HTableDescriptor htd = - this.masterServices.getTableDescriptors().get(name); + this.masterServices.getTableDescriptors().get(tableName); if (htd == null) { - throw new IOException("HTableDescriptor missing for " + name); + throw new IOException("HTableDescriptor missing for " + tableName); } return htd; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java index a5c1cff..b0dde7a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -38,7 +39,7 @@ import org.apache.hadoop.hbase.master.MasterServices; public class TableModifyFamilyHandler extends TableEventHandler { private final HColumnDescriptor familyDesc; - public TableModifyFamilyHandler(byte[] tableName, + public TableModifyFamilyHandler(TableName tableName, HColumnDescriptor familyDesc, Server server, final MasterServices masterServices) { super(EventType.C_M_MODIFY_FAMILY, tableName, server, masterServices); @@ -76,7 +77,8 @@ public class TableModifyFamilyHandler extends TableEventHandler { if(familyDesc != null) { family = familyDesc.getNameAsString(); } - return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-" + tableNameStr + "-" + family; + return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + + "-" + tableName + "-" + family; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java index ba7dae0..e8e23c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java @@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; @@ -47,6 +48,7 @@ import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import com.google.common.base.Preconditions; +import org.apache.hadoop.hbase.util.FSUtils; /** * Handler to Clone a snapshot. @@ -81,7 +83,7 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot // Monitor this.monitor = new ForeignExceptionDispatcher(); this.status = TaskMonitor.get().createStatus("Cloning snapshot '" + snapshot.getName() + - "' to table " + hTableDescriptor.getNameAsString()); + "' to table " + hTableDescriptor.getTableName()); } @Override @@ -97,17 +99,16 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot */ @Override protected List handleCreateHdfsRegions(final Path tableRootDir, - final String tableName) throws IOException { + final TableName tableName) throws IOException { status.setStatus("Creating regions for table: " + tableName); FileSystem fs = fileSystemManager.getFileSystem(); Path rootDir = fileSystemManager.getRootDir(); - Path tableDir = new Path(tableRootDir, tableName); try { // 1. Execute the on-disk Clone Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir); RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(conf, fs, - snapshot, snapshotDir, hTableDescriptor, tableDir, monitor, status); + snapshot, snapshotDir, hTableDescriptor, tableRootDir, monitor, status); RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions(); // Clone operation should not have stuff to restore or remove diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java index 92d0818..5228b24 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java @@ -115,13 +115,13 @@ public class DisabledTableSnapshotHandler extends TakeSnapshotHandler { new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir, snapshotRegionDir).call(); monitor.rethrowException(); status.setStatus("Completed copying recovered edits for offline snapshot of table: " - + snapshot.getTable()); + + snapshotTable); // 2.3 reference all the files in the region new ReferenceRegionHFilesTask(snapshot, monitor, regionDir, fs, snapshotRegionDir).call(); monitor.rethrowException(); status.setStatus("Completed referencing HFiles for offline snapshot of table: " + - snapshot.getTable()); + snapshotTable); } // 3. write the table info to disk @@ -131,14 +131,16 @@ public class DisabledTableSnapshotHandler extends TakeSnapshotHandler { FSUtils.getRootDir(conf)); tableInfoCopyTask.call(); monitor.rethrowException(); - status.setStatus("Finished copying tableinfo for snapshot of table: " + snapshot.getTable()); + status.setStatus("Finished copying tableinfo for snapshot of table: " + + snapshotTable); } catch (Exception e) { // make sure we capture the exception to propagate back to the client later String reason = "Failed snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot) + " due to exception:" + e.getMessage(); ForeignException ee = new ForeignException(reason, e); monitor.receive(ee); - status.abort("Snapshot of table: "+ snapshot.getTable() +" failed because " + e.getMessage()); + status.abort("Snapshot of table: "+ snapshotTable + + " failed because " + e.getMessage()); } finally { LOG.debug("Marking snapshot" + ClientSnapshotDescriptionUtils.toString(snapshot) + " as finished."); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java index 58bea29..1ec8faa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java @@ -23,23 +23,22 @@ import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.TakeSnapshotUtils; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; @@ -79,7 +78,7 @@ public final class MasterSnapshotVerifier { private SnapshotDescription snapshot; private FileSystem fs; private Path rootDir; - private String tableName; + private TableName tableName; private MasterServices services; /** @@ -92,7 +91,7 @@ public final class MasterSnapshotVerifier { this.services = services; this.snapshot = snapshot; this.rootDir = rootDir; - this.tableName = snapshot.getTable(); + this.tableName = TableName.valueOf(snapshot.getTable()); } /** @@ -141,7 +140,7 @@ public final class MasterSnapshotVerifier { */ private void verifyRegions(Path snapshotDir) throws IOException { List regions = MetaReader.getTableRegions(this.services.getCatalogTracker(), - Bytes.toBytes(tableName)); + tableName); for (HRegionInfo region : regions) { // if offline split parent, skip it if (region.isOffline() && (region.isSplit() || region.isSplitParent())) { @@ -189,7 +188,7 @@ public final class MasterSnapshotVerifier { if (columnFamilies == null) return; // setup the suffixes for the snapshot directories - Path tableNameSuffix = new Path(tableName); + Path tableNameSuffix = FSUtils.getTableDir(new Path("./"), tableName); Path regionNameSuffix = new Path(tableNameSuffix, region.getEncodedName()); // get the potential real paths diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java index 1626581..392fe3b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java @@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.catalog.CatalogTracker; @@ -47,7 +48,7 @@ import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; -import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; /** * Handler to Restore a snapshot. @@ -71,7 +72,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho public RestoreSnapshotHandler(final MasterServices masterServices, final SnapshotDescription snapshot, final HTableDescriptor htd, final MetricsMaster metricsMaster) throws IOException { - super(EventType.C_M_RESTORE_SNAPSHOT, htd.getName(), masterServices, masterServices); + super(EventType.C_M_RESTORE_SNAPSHOT, htd.getTableName(), masterServices, masterServices); this.metricsMaster = metricsMaster; // Snapshot information @@ -88,7 +89,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho this.status = TaskMonitor.get().createStatus( "Restoring snapshot '" + snapshot.getName() + "' to table " - + hTableDescriptor.getNameAsString()); + + hTableDescriptor.getTableName()); } public RestoreSnapshotHandler prepare() throws IOException { @@ -109,8 +110,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho CatalogTracker catalogTracker = masterServices.getCatalogTracker(); FileSystem fs = fileSystemManager.getFileSystem(); Path rootDir = fileSystemManager.getRootDir(); - byte[] tableName = hTableDescriptor.getName(); - Path tableDir = HTableDescriptor.getTableDir(rootDir, tableName); + TableName tableName = hTableDescriptor.getTableName(); try { // 1. Update descriptor @@ -121,7 +121,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir); RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper( masterServices.getConfiguration(), fs, - snapshot, snapshotDir, hTableDescriptor, tableDir, monitor, status); + snapshot, snapshotDir, hTableDescriptor, rootDir, monitor, status); RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions(); // 3. Applies changes to .META. @@ -134,7 +134,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho // At this point the restore is complete. Next step is enabling the table. LOG.info("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + - " on table=" + Bytes.toString(tableName) + " completed!"); + " on table=" + tableName + " completed!"); } catch (IOException e) { String msg = "restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + " failed. Try re-running the restore command."; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index e2afdf7..de474e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Stoppable; @@ -56,6 +57,7 @@ import org.apache.hadoop.hbase.procedure.Procedure; import org.apache.hadoop.hbase.procedure.ProcedureCoordinator; import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs; import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinatorRpcs; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; @@ -68,7 +70,6 @@ import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; import org.apache.hadoop.hbase.snapshot.SnapshotExistsException; import org.apache.hadoop.hbase.snapshot.TablePartiallyOpenException; import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; @@ -143,13 +144,15 @@ public class SnapshotManager implements Stoppable { // The map is always accessed and modified under the object lock using synchronized. // snapshotTable() will insert an Handler in the table. // isSnapshotDone() will remove the handler requested if the operation is finished. - private Map snapshotHandlers = new HashMap(); + private Map snapshotHandlers = + new HashMap(); // Restore Sentinels map, with table name as key. // The map is always accessed and modified under the object lock using synchronized. // restoreSnapshot()/cloneSnapshot() will insert an Handler in the table. // isRestoreDone() will remove the handler requested if the operation is finished. - private Map restoreHandlers = new HashMap(); + private Map restoreHandlers = + new HashMap(); private final Path rootDir; private final ExecutorService executorService; @@ -378,7 +381,7 @@ public class SnapshotManager implements Stoppable { * @param tableName name of the table being snapshotted. * @return true if there is a snapshot in progress on the specified table. */ - synchronized boolean isTakingSnapshot(final String tableName) { + synchronized boolean isTakingSnapshot(final TableName tableName) { SnapshotSentinel handler = this.snapshotHandlers.get(tableName); return handler != null && !handler.isFinished(); } @@ -393,10 +396,12 @@ public class SnapshotManager implements Stoppable { throws HBaseSnapshotException { FileSystem fs = master.getMasterFileSystem().getFileSystem(); Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir); + TableName snapshotTable = + TableName.valueOf(snapshot.getTable()); // make sure we aren't already running a snapshot - if (isTakingSnapshot(snapshot.getTable())) { - SnapshotSentinel handler = this.snapshotHandlers.get(snapshot.getTable()); + if (isTakingSnapshot(snapshotTable)) { + SnapshotSentinel handler = this.snapshotHandlers.get(snapshotTable); throw new SnapshotCreationException("Rejected taking " + ClientSnapshotDescriptionUtils.toString(snapshot) + " because we are already running another snapshot " @@ -404,8 +409,8 @@ public class SnapshotManager implements Stoppable { } // make sure we aren't running a restore on the same table - if (isRestoringTable(snapshot.getTable())) { - SnapshotSentinel handler = restoreHandlers.get(snapshot.getTable()); + if (isRestoringTable(snapshotTable)) { + SnapshotSentinel handler = restoreHandlers.get(snapshotTable); throw new SnapshotCreationException("Rejected taking " + ClientSnapshotDescriptionUtils.toString(snapshot) + " because we are already have a restore in progress on the same snapshot " @@ -478,7 +483,7 @@ public class SnapshotManager implements Stoppable { try { handler.prepare(); this.executorService.submit(handler); - this.snapshotHandlers.put(snapshot.getTable(), handler); + this.snapshotHandlers.put(TableName.valueOf(snapshot.getTable()), handler); } catch (Exception e) { // cleanup the working directory by trying to delete it from the fs. Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir); @@ -518,7 +523,8 @@ public class SnapshotManager implements Stoppable { // check to see if the table exists HTableDescriptor desc = null; try { - desc = master.getTableDescriptors().get(snapshot.getTable()); + desc = master.getTableDescriptors().get( + TableName.valueOf(snapshot.getTable())); } catch (FileNotFoundException e) { String msg = "Table:" + snapshot.getTable() + " info doesn't exist!"; LOG.error(msg); @@ -543,14 +549,15 @@ public class SnapshotManager implements Stoppable { } // if the table is enabled, then have the RS run actually the snapshot work + TableName snapshotTable = TableName.valueOf(snapshot.getTable()); AssignmentManager assignmentMgr = master.getAssignmentManager(); - if (assignmentMgr.getZKTable().isEnabledTable(snapshot.getTable())) { + if (assignmentMgr.getZKTable().isEnabledTable(snapshotTable)) { LOG.debug("Table enabled, starting distributed snapshot."); snapshotEnabledTable(snapshot); LOG.debug("Started snapshot: " + ClientSnapshotDescriptionUtils.toString(snapshot)); } // For disabled table, snapshot is created by the master - else if (assignmentMgr.getZKTable().isDisabledTable(snapshot.getTable())) { + else if (assignmentMgr.getZKTable().isDisabledTable(snapshotTable)) { LOG.debug("Table is disabled, running snapshot entirely on master."); snapshotDisabledTable(snapshot); LOG.debug("Started snapshot: " + ClientSnapshotDescriptionUtils.toString(snapshot)); @@ -577,7 +584,8 @@ public class SnapshotManager implements Stoppable { * * TODO get rid of this if possible, repackaging, modify tests. */ - public synchronized void setSnapshotHandlerForTesting(final String tableName, + public synchronized void setSnapshotHandlerForTesting( + final TableName tableName, final SnapshotSentinel handler) { if (handler != null) { this.snapshotHandlers.put(tableName, handler); @@ -607,7 +615,6 @@ public class SnapshotManager implements Stoppable { try { final Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir); FileSystem fs = master.getMasterFileSystem().getFileSystem(); - // check to see if the snapshot already exists return fs.exists(snapshotDir); } catch (IllegalArgumentException iae) { @@ -624,7 +631,7 @@ public class SnapshotManager implements Stoppable { */ synchronized void cloneSnapshot(final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor) throws HBaseSnapshotException { - String tableName = hTableDescriptor.getNameAsString(); + TableName tableName = hTableDescriptor.getTableName(); // make sure we aren't running a snapshot on the same table if (isTakingSnapshot(tableName)) { @@ -669,16 +676,19 @@ public class SnapshotManager implements Stoppable { SnapshotDescription fsSnapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); HTableDescriptor snapshotTableDesc = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir); - String tableName = reqSnapshot.getTable(); + TableName tableName = TableName.valueOf(reqSnapshot.getTable()); // stop tracking "abandoned" handlers cleanupSentinels(); // Execute the restore/clone operation if (MetaReader.tableExists(master.getCatalogTracker(), tableName)) { - if (master.getAssignmentManager().getZKTable().isEnabledTable(fsSnapshot.getTable())) { + if (master.getAssignmentManager().getZKTable().isEnabledTable( + TableName.valueOf(fsSnapshot.getTable()))) { throw new UnsupportedOperationException("Table '" + - fsSnapshot.getTable() + "' must be disabled in order to perform a restore operation."); + TableName.valueOf(fsSnapshot.getTable()) + "' must be disabled in order to " + + "perform a restore operation" + + "."); } // call coproc pre hook @@ -692,8 +702,7 @@ public class SnapshotManager implements Stoppable { cpHost.postRestoreSnapshot(reqSnapshot, snapshotTableDesc); } } else { - HTableDescriptor htd = RestoreSnapshotHelper.cloneTableSchema(snapshotTableDesc, - Bytes.toBytes(tableName)); + HTableDescriptor htd = RestoreSnapshotHelper.cloneTableSchema(snapshotTableDesc, tableName); if (cpHost != null) { cpHost.preCloneSnapshot(reqSnapshot, htd); } @@ -715,7 +724,7 @@ public class SnapshotManager implements Stoppable { */ private synchronized void restoreSnapshot(final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor) throws HBaseSnapshotException { - String tableName = hTableDescriptor.getNameAsString(); + TableName tableName = hTableDescriptor.getTableName(); // make sure we aren't running a snapshot on the same table if (isTakingSnapshot(tableName)) { @@ -731,7 +740,7 @@ public class SnapshotManager implements Stoppable { RestoreSnapshotHandler handler = new RestoreSnapshotHandler(master, snapshot, hTableDescriptor, metricsMaster).prepare(); this.executorService.submit(handler); - restoreHandlers.put(hTableDescriptor.getNameAsString(), handler); + restoreHandlers.put(tableName, handler); } catch (Exception e) { String msg = "Couldn't restore the snapshot=" + ClientSnapshotDescriptionUtils.toString( snapshot) + @@ -747,7 +756,7 @@ public class SnapshotManager implements Stoppable { * @param tableName table under restore * @return true if there is a restore in progress of the specified table. */ - private synchronized boolean isRestoringTable(final String tableName) { + private synchronized boolean isRestoringTable(final TableName tableName) { SnapshotSentinel sentinel = this.restoreHandlers.get(tableName); return(sentinel != null && !sentinel.isFinished()); } @@ -774,7 +783,8 @@ public class SnapshotManager implements Stoppable { } LOG.debug("Verify snapshot=" + snapshot.getName() + " against=" - + sentinel.getSnapshot().getName() + " table=" + snapshot.getTable()); + + sentinel.getSnapshot().getName() + " table=" + + TableName.valueOf(snapshot.getTable())); // If the restore is failed, rethrow the exception sentinel.rethrowExceptionIfFailed(); @@ -801,8 +811,14 @@ public class SnapshotManager implements Stoppable { * @return null if doesn't match, else a live handler. */ private synchronized SnapshotSentinel removeSentinelIfFinished( - final Map sentinels, final SnapshotDescription snapshot) { - SnapshotSentinel h = sentinels.get(snapshot.getTable()); + final Map sentinels, + final SnapshotDescription snapshot) { + if (!snapshot.hasTable()) { + return null; + } + + TableName snapshotTable = TableName.valueOf(snapshot.getTable()); + SnapshotSentinel h = sentinels.get(snapshotTable); if (h == null) { return null; } @@ -814,7 +830,7 @@ public class SnapshotManager implements Stoppable { // Remove from the "in-progress" list once completed if (h.isFinished()) { - sentinels.remove(snapshot.getTable()); + sentinels.remove(snapshotTable); } return h; @@ -837,11 +853,12 @@ public class SnapshotManager implements Stoppable { * has exceeded the removal timeout. * @param sentinels map of sentinels to clean */ - private synchronized void cleanupSentinels(final Map sentinels) { + private synchronized void cleanupSentinels(final Map sentinels) { long currentTime = EnvironmentEdgeManager.currentTimeMillis(); - Iterator> it = sentinels.entrySet().iterator(); + Iterator> it = + sentinels.entrySet().iterator(); while (it.hasNext()) { - Map.Entry entry = it.next(); + Map.Entry entry = it.next(); SnapshotSentinel sentinel = entry.getValue(); if (sentinel.isFinished() && (currentTime - sentinel.getCompletionTimestamp()) > SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java index b5fd8b0..91cd8bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java @@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; @@ -46,12 +47,12 @@ import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.TableInfoCopyTask; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.zookeeper.KeeperException; @@ -83,6 +84,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh protected final TableLockManager tableLockManager; protected final TableLock tableLock; protected final MonitoredTask status; + protected final TableName snapshotTable; /** * @param snapshot descriptor of the snapshot to take @@ -97,6 +99,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh this.master = masterServices; this.metricsMaster = metricsMaster; this.snapshot = snapshot; + this.snapshotTable = TableName.valueOf(snapshot.getTable()); this.conf = this.master.getConfiguration(); this.fs = this.master.getMasterFileSystem().getFileSystem(); this.rootDir = this.master.getMasterFileSystem().getRootDir(); @@ -105,23 +108,23 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh this.monitor = new ForeignExceptionDispatcher(snapshot.getName()); this.tableLockManager = master.getTableLockManager(); - this.tableLock = this.tableLockManager.writeLock(Bytes.toBytes(snapshot.getTable()) - , EventType.C_M_SNAPSHOT_TABLE.toString()); + this.tableLock = this.tableLockManager.writeLock( + snapshotTable, + EventType.C_M_SNAPSHOT_TABLE.toString()); // prepare the verify this.verifier = new MasterSnapshotVerifier(masterServices, snapshot, rootDir); // update the running tasks this.status = TaskMonitor.get().createStatus( - "Taking " + snapshot.getType() + " snapshot on table: " + snapshot.getTable()); + "Taking " + snapshot.getType() + " snapshot on table: " + snapshotTable); } private HTableDescriptor loadTableDescriptor() throws FileNotFoundException, IOException { - final String name = snapshot.getTable(); HTableDescriptor htd = - this.master.getTableDescriptors().get(name); + this.master.getTableDescriptors().get(snapshotTable); if (htd == null) { - throw new IOException("HTableDescriptor missing for " + name); + throw new IOException("HTableDescriptor missing for " + snapshotTable); } return htd; } @@ -150,7 +153,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh @Override public void process() { String msg = "Running " + snapshot.getType() + " table snapshot " + snapshot.getName() + " " - + eventType + " on table " + snapshot.getTable(); + + eventType + " on table " + snapshotTable; LOG.info(msg); status.setStatus(msg); try { @@ -164,7 +167,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh List> regionsAndLocations = MetaReader.getTableRegionsAndLocations(this.server.getCatalogTracker(), - Bytes.toBytes(snapshot.getTable()), true); + snapshotTable, true); // run the snapshot snapshotRegions(regionsAndLocations); @@ -182,12 +185,12 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh // complete the snapshot, atomically moving from tmp to .snapshot dir. completeSnapshot(this.snapshotDir, this.workingDir, this.fs); - status.markComplete("Snapshot " + snapshot.getName() + " of table " + snapshot.getTable() + status.markComplete("Snapshot " + snapshot.getName() + " of table " + snapshotTable + " completed"); metricsMaster.addSnapshot(status.getCompletionTimestamp() - status.getStartTime()); } catch (Exception e) { status.abort("Failed to complete snapshot " + snapshot.getName() + " on table " + - snapshot.getTable() + " because " + e.getMessage()); + snapshotTable + " because " + e.getMessage()); String reason = "Failed taking snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot) + " due to exception:" + e.getMessage(); LOG.error(reason, e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java new file mode 100644 index 0000000..def4eba --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java @@ -0,0 +1,219 @@ +/** + * The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.migration; + +import com.google.common.collect.Lists; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.util.Tool; + +import java.io.IOException; +import java.util.List; + +/** + * Upgrades old 0.94 filesystem layout to namespace layout + * Does the following: + * + * - creates system namespace directory and move .META. table there + * renaming .META. table to hbase:meta, + * this in turn would require to re-encode the region directory name + */ +public class NamespaceUpgrade implements Tool { + private static final Log LOG = LogFactory.getLog(NamespaceUpgrade.class); + + private Configuration conf; + + private FileSystem fs; + + private Path rootDir; + private Path sysNsDir; + private Path defNsDir; + private Path baseDirs[]; + + public NamespaceUpgrade() throws IOException { + } + + public void init() throws IOException { + this.rootDir = FSUtils.getRootDir(conf); + this.fs = FileSystem.get(conf); + sysNsDir = FSUtils.getNamespaceDir(rootDir, NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); + defNsDir = FSUtils.getNamespaceDir(rootDir, NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR); + baseDirs = new Path[]{rootDir, + new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY), + new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY)}; + } + + + public void upgradeTableDirs() + throws IOException, DeserializationException { + + + //if new version is written then upgrade is done + if (verifyNSUpgrade(fs, rootDir)) { + return; + } + + makeNamespaceDirs(); + + migrateTables(); + + migrateSnapshots(); + + migrateMeta(); + + FSUtils.setVersion(fs, rootDir); + } + + public void makeNamespaceDirs() throws IOException { + if (!fs.exists(sysNsDir)) { + if (!fs.mkdirs(sysNsDir)) { + throw new IOException("Failed to create system namespace dir: " + sysNsDir); + } + } + if (!fs.exists(defNsDir)) { + if (!fs.mkdirs(defNsDir)) { + throw new IOException("Failed to create default namespace dir: " + defNsDir); + } + } + } + + public void migrateTables() throws IOException { + List sysTables = Lists.newArrayList("-ROOT-",".META."); + + //migrate tables including archive and tmp + for(Path baseDir: baseDirs) { + List oldTableDirs = FSUtils.getLocalTableDirs(fs, baseDir); + for(Path oldTableDir: oldTableDirs) { + if (!sysTables.contains(oldTableDir.getName())) { + Path nsDir = FSUtils.getTableDir(baseDir, + TableName.valueOf(oldTableDir.getName())); + if(!fs.exists(nsDir.getParent())) { + if(!fs.mkdirs(nsDir.getParent())) { + throw new IOException("Failed to create namespace dir "+nsDir.getParent()); + } + } + if (sysTables.indexOf(oldTableDir.getName()) < 0) { + LOG.info("Migrating table " + oldTableDir.getName() + " to " + nsDir); + if (!fs.rename(oldTableDir, nsDir)) { + throw new IOException("Failed to move "+oldTableDir+" to namespace dir "+nsDir); + } + } + } + } + } + } + + public void migrateSnapshots() throws IOException { + //migrate snapshot dir + Path oldSnapshotDir = new Path(rootDir, HConstants.OLD_SNAPSHOT_DIR_NAME); + Path newSnapshotDir = new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME); + if (fs.exists(oldSnapshotDir)) { + boolean foundOldSnapshotDir = false; + // Logic to verify old snapshot dir culled from SnapshotManager + // ignore all the snapshots in progress + FileStatus[] snapshots = fs.listStatus(oldSnapshotDir, + new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs)); + // loop through all the completed snapshots + for (FileStatus snapshot : snapshots) { + Path info = new Path(snapshot.getPath(), SnapshotDescriptionUtils.SNAPSHOTINFO_FILE); + // if the snapshot is bad + if (fs.exists(info)) { + foundOldSnapshotDir = true; + break; + } + } + if(foundOldSnapshotDir) { + LOG.info("Migrating snapshot dir"); + if (!fs.rename(oldSnapshotDir, newSnapshotDir)) { + throw new IOException("Failed to move old snapshot dir "+ + oldSnapshotDir+" to new "+newSnapshotDir); + } + } + } + } + + public void migrateMeta() throws IOException { + Path newMetaRegionDir = HRegion.getRegionDir(rootDir, HRegionInfo.FIRST_META_REGIONINFO); + Path newMetaDir = FSUtils.getTableDir(rootDir, TableName.META_TABLE_NAME); + Path oldMetaDir = new Path(rootDir, ".META."); + if (fs.exists(oldMetaDir)) { + LOG.info("Migrating meta table " + oldMetaDir.getName() + " to " + newMetaDir); + if (!fs.rename(oldMetaDir, newMetaDir)) { + throw new IOException("Failed to migrate meta table " + + oldMetaDir.getName() + " to " + newMetaDir); + } + } + + //since meta table name has changed + //rename meta region dir from it's old encoding to new one + Path oldMetaRegionDir = HRegion.getRegionDir(rootDir, + new Path(newMetaDir, "1028785192").toString()); + if (fs.exists(oldMetaRegionDir)) { + LOG.info("Migrating meta region " + oldMetaRegionDir + " to " + newMetaRegionDir); + if (!fs.rename(oldMetaRegionDir, newMetaRegionDir)) { + throw new IOException("Failed to migrate meta region " + + oldMetaRegionDir + " to " + newMetaRegionDir); + } + } + } + + public static boolean verifyNSUpgrade(FileSystem fs, Path rootDir) + throws IOException { + try { + return FSUtils.getVersion(fs, rootDir).equals(HConstants.FILE_SYSTEM_VERSION); + } catch (DeserializationException e) { + throw new IOException("Failed to verify namespace upgrade", e); + } + } + + + @Override + public int run(String[] args) throws Exception { + if(args.length < 1 || !args[0].equals("--upgrade")) { + System.out.println("Usage: --upgrade"); + return 0; + } + init(); + upgradeTableDirs(); + return 0; + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + @Override + public Configuration getConf() { + return conf; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java index 0bcd535..eb93bd3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java @@ -119,7 +119,7 @@ public class ReplicationProtbufUtil { HLogKey key = entry.getKey(); keyBuilder.setEncodedRegionName( ByteString.copyFrom(key.getEncodedRegionName())); - keyBuilder.setTableName(ByteString.copyFrom(key.getTablename())); + keyBuilder.setTableName(ByteString.copyFrom(key.getTablename().getName())); keyBuilder.setLogSequenceNumber(key.getLogSeqNum()); keyBuilder.setWriteTime(key.getWriteTime()); UUID clusterId = key.getClusterId(); @@ -190,4 +190,4 @@ public class ReplicationProtbufUtil { } }; } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index e0841d3..f5f67c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -153,7 +153,7 @@ public class CompactionTool extends Configured implements Tool { final HRegionInfo hri, final String familyName, final boolean compactOnce, final boolean major) throws IOException { HStore store = getStore(conf, fs, tableDir, htd, hri, familyName, tmpDir); - LOG.info("Compact table=" + htd.getNameAsString() + + LOG.info("Compact table=" + htd.getTableName() + " region=" + hri.getRegionNameAsString() + " family=" + familyName); if (major) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DelimitedKeyPrefixRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DelimitedKeyPrefixRegionSplitPolicy.java index 6082c52..78d5d79 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DelimitedKeyPrefixRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DelimitedKeyPrefixRegionSplitPolicy.java @@ -57,7 +57,7 @@ public class DelimitedKeyPrefixRegionSplitPolicy extends IncreasingToUpperBoundR DELIMITER_KEY); if (delimiterString == null || delimiterString.length() == 0) { LOG.error(DELIMITER_KEY + " not specified for table " - + region.getTableDesc().getNameAsString() + + region.getTableDesc().getTableName() + ". Using default RegionSplitPolicy"); return; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 9939f6a..9e3f7c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -73,6 +73,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionTooBusyException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; import org.apache.hadoop.hbase.HDFSBlocksDistribution; @@ -90,7 +91,6 @@ import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare; @@ -744,7 +744,7 @@ public class HRegion implements HeapSize { // , Writable{ public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf, final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo) throws IOException { HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); - Path tablePath = FSUtils.getTablePath(FSUtils.getRootDir(conf), tableDescriptor.getName()); + Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), tableDescriptor.getTableName()); FileSystem fs = tablePath.getFileSystem(conf); HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo); @@ -2204,7 +2204,7 @@ public class HRegion implements HeapSize { // , Writable{ // ------------------------- Mutation mutation = batchOp.operations[firstIndex]; if (walEdit.size() > 0) { - txid = this.log.appendNoSync(this.getRegionInfo(), this.htableDescriptor.getName(), + txid = this.log.appendNoSync(this.getRegionInfo(), this.htableDescriptor.getTableName(), walEdit, mutation.getClusterId(), now, this.htableDescriptor); } @@ -3914,11 +3914,11 @@ public class HRegion implements HeapSize { // , Writable{ final HLog hlog, final boolean initialize, final boolean ignoreHLog) throws IOException { - LOG.info("creating HRegion " + info.getTableNameAsString() + LOG.info("creating HRegion " + info.getTableName().getNameAsString() + " HTD == " + hTableDescriptor + " RootDir = " + rootDir + - " Table name == " + info.getTableNameAsString()); + " Table name == " + info.getTableName().getNameAsString()); - Path tableDir = HTableDescriptor.getTableDir(rootDir, info.getTableName()); + Path tableDir = FSUtils.getTableDir(rootDir, info.getTableName()); FileSystem fs = FileSystem.get(conf); HRegionFileSystem rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, info); HLog effectiveHLog = hlog; @@ -4075,8 +4075,10 @@ public class HRegion implements HeapSize { // , Writable{ final RegionServerServices rsServices, final CancelableProgressable reporter) throws IOException { if (info == null) throw new NullPointerException("Passed region info is null"); - LOG.info("Open " + info); - Path dir = HTableDescriptor.getTableDir(rootDir, info.getTableName()); + if (LOG.isDebugEnabled()) { + LOG.debug("Opening region: " + info); + } + Path dir = FSUtils.getTableDir(rootDir, info.getTableName()); HRegion r = HRegion.newHRegion(dir, wal, fs, conf, info, htd, rsServices); return r.openHRegion(reporter); } @@ -4205,7 +4207,7 @@ public class HRegion implements HeapSize { // , Writable{ @Deprecated public static Path getRegionDir(final Path rootdir, final HRegionInfo info) { return new Path( - HTableDescriptor.getTableDir(rootdir, info.getTableName()), + FSUtils.getTableDir(rootdir, info.getTableName()), info.getEncodedName()); } @@ -4265,8 +4267,8 @@ public class HRegion implements HeapSize { // , Writable{ * @throws IOException */ public static HRegion merge(final HRegion a, final HRegion b) throws IOException { - if (!a.getRegionInfo().getTableNameAsString().equals( - b.getRegionInfo().getTableNameAsString())) { + if (!a.getRegionInfo().getTableName().equals( + b.getRegionInfo().getTableName())) { throw new IOException("Regions do not belong to the same table"); } @@ -4522,7 +4524,7 @@ public class HRegion implements HeapSize { // , Writable{ // 7. Append no sync if (!walEdit.isEmpty()) { txid = this.log.appendNoSync(this.getRegionInfo(), - this.htableDescriptor.getName(), walEdit, + this.htableDescriptor.getTableName(), walEdit, processor.getClusterId(), now, this.htableDescriptor); } // 8. Release region lock @@ -4749,7 +4751,7 @@ public class HRegion implements HeapSize { // , Writable{ // Using default cluster id, as this can only happen in the orginating // cluster. A slave cluster receives the final value (not the delta) // as a Put. - txid = this.log.appendNoSync(this.getRegionInfo(), this.htableDescriptor.getName(), + txid = this.log.appendNoSync(this.getRegionInfo(), this.htableDescriptor.getTableName(), walEdits, HConstants.DEFAULT_CLUSTER_ID, EnvironmentEdgeManager.currentTimeMillis(), this.htableDescriptor); } else { @@ -4899,7 +4901,7 @@ public class HRegion implements HeapSize { // , Writable{ // Using default cluster id, as this can only happen in the orginating // cluster. A slave cluster receives the final value (not the delta) // as a Put. - txid = this.log.appendNoSync(this.getRegionInfo(), this.htableDescriptor.getName(), + txid = this.log.appendNoSync(this.getRegionInfo(), this.htableDescriptor.getTableName(), walEdits, HConstants.DEFAULT_CLUSTER_ID, EnvironmentEdgeManager.currentTimeMillis(), this.htableDescriptor); } else { @@ -5117,10 +5119,9 @@ public class HRegion implements HeapSize { // , Writable{ final HLog log, final Configuration c, final boolean majorCompact) throws IOException { - HRegion region; - String metaStr = Bytes.toString(HConstants.META_TABLE_NAME); + HRegion region = null; // Currently expects tables have one region only. - if (p.getName().startsWith(metaStr)) { + if (FSUtils.getTableName(p).equals(TableName.META_TABLE_NAME)) { region = HRegion.newHRegion(p, log, fs, c, HRegionInfo.FIRST_META_REGIONINFO, HTableDescriptor.META_TABLEDESC, null); } else { @@ -5188,7 +5189,8 @@ public class HRegion implements HeapSize { // , Writable{ */ public byte[] checkSplit() { // Can't split META - if (this.getRegionInfo().isMetaTable()) { + if (this.getRegionInfo().isMetaTable() || + TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTableName())) { if (shouldForceSplit()) { LOG.warn("Cannot split meta region in HBase 0.20 and above"); } @@ -5491,7 +5493,7 @@ public class HRegion implements HeapSize { // , Writable{ final Configuration c = HBaseConfiguration.create(); final FileSystem fs = FileSystem.get(c); final Path logdir = new Path(c.get("hbase.tmp.dir")); - final String logname = "hlog" + tableDir.getName() + final String logname = "hlog" + FSUtils.getTableName(tableDir) + EnvironmentEdgeManager.currentTimeMillis(); final HLog log = HLogFactory.createHLog(fs, logdir, logname, c); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 3c44147..114da83 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -59,6 +59,8 @@ import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.Chore; +import org.apache.hadoop.hbase.DaemonThreadFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; @@ -2308,13 +2310,13 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa * @param tableName * @return Online regions from tableName */ - @Override - public List getOnlineRegions(byte[] tableName) { + @Override + public List getOnlineRegions(TableName tableName) { List tableRegions = new ArrayList(); synchronized (this.onlineRegions) { for (HRegion region: this.onlineRegions.values()) { HRegionInfo regionInfo = region.getRegionInfo(); - if(Bytes.equals(regionInfo.getTableName(), tableName)) { + if(regionInfo.getTableName().equals(tableName)) { tableRegions.add(region); } } @@ -3443,7 +3445,8 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa requestCount.increment(); OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder(); final int regionCount = request.getOpenInfoCount(); - final Map htds = new HashMap(regionCount); + final Map htds = + new HashMap(regionCount); final boolean isBulkAssign = regionCount > 1; for (RegionOpenInfo regionOpenInfo : request.getOpenInfoList()) { final HRegionInfo region = HRegionInfo.convert(regionOpenInfo.getRegion()); @@ -3485,10 +3488,10 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa } } LOG.info("Open " + region.getRegionNameAsString()); - htd = htds.get(region.getTableNameAsString()); + htd = htds.get(region.getTableName()); if (htd == null) { htd = this.tableDescriptors.get(region.getTableName()); - htds.put(region.getTableNameAsString(), htd); + htds.put(region.getTableName(), htd); } final Boolean previous = this.regionsInTransitionInRS.putIfAbsent( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 5fa086e..1cb57bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -48,6 +48,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CompoundConfiguration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -278,8 +279,8 @@ public class HStore implements Store { } @Override - public String getTableName() { - return this.getRegionInfo().getTableNameAsString(); + public TableName getTableName() { + return this.getRegionInfo().getTableName(); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java index b623e14..64ab4f3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java @@ -23,9 +23,9 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.util.Bytes; /** * Split size is the number of regions that are on this server that all are @@ -105,13 +105,13 @@ extends ConstantSizeRegionSplitPolicy { RegionServerServices rss = this.region.getRegionServerServices(); // Can be null in tests if (rss == null) return 0; - byte [] tablename = this.region.getTableDesc().getName(); + TableName tablename = this.region.getTableDesc().getTableName(); int tableRegionsCount = 0; try { List hri = rss.getOnlineRegions(tablename); tableRegionsCount = hri == null || hri.isEmpty()? 0: hri.size(); } catch (IOException e) { - LOG.debug("Failed getOnlineRegions " + Bytes.toString(tablename), e); + LOG.debug("Failed getOnlineRegions " + tablename, e); } return tableRegionsCount; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyPrefixRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyPrefixRegionSplitPolicy.java index 3548a62..530b235 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyPrefixRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyPrefixRegionSplitPolicy.java @@ -54,7 +54,7 @@ public class KeyPrefixRegionSplitPolicy extends IncreasingToUpperBoundRegionSpli prefixLengthString = region.getTableDesc().getValue(PREFIX_LENGTH_KEY_DEPRECATED); if (prefixLengthString == null) { LOG.error(PREFIX_LENGTH_KEY + " not specified for table " - + region.getTableDesc().getNameAsString() + + region.getTableDesc().getTableName() + ". Using default RegionSplitPolicy"); return; } @@ -66,7 +66,7 @@ public class KeyPrefixRegionSplitPolicy extends IncreasingToUpperBoundRegionSpli } if (prefixLength <= 0) { LOG.error("Invalid value for " + PREFIX_LENGTH_KEY + " for table " - + region.getTableDesc().getNameAsString() + ":" + + region.getTableDesc().getTableName() + ":" + prefixLengthString + ". Using default RegionSplitPolicy"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java index 9c9c577..146ed3a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java @@ -57,7 +57,7 @@ public class MetricsRegionWrapperImpl implements MetricsRegionWrapper, Closeable if (tableDesc == null) { return ""; } - return tableDesc.getNameAsString(); + return tableDesc.getTableName().getNameAsString(); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java index 52dcda0..74d98e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; @@ -62,5 +63,5 @@ interface OnlineRegions extends Server { * @return List of HRegion * @throws java.io.IOException */ - List getOnlineRegions(byte[] tableName) throws IOException; + List getOnlineRegions(TableName tableName) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 4fe11f4..03d9c5f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -147,7 +147,7 @@ public class RegionCoprocessorHost loadSystemCoprocessors(conf, REGION_COPROCESSOR_CONF_KEY); // load system default cp's for user tables from configuration. - if (!HTableDescriptor.isMetaTable(region.getRegionInfo().getTableName())) { + if (!HTableDescriptor.isSystemTable(region.getRegionInfo().getTableName())) { loadSystemCoprocessors(conf, USER_REGION_COPROCESSOR_CONF_KEY); } @@ -195,7 +195,7 @@ public class RegionCoprocessorHost configured.add(load(path, className, priority, conf)); } LOG.info("Load coprocessor " + className + " from HTD of " + - Bytes.toString(region.getTableDesc().getName()) + + region.getTableDesc().getTableName().getNameAsString() + " successfully."); } else { throw new RuntimeException("specification does not match pattern"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java index 084576d..ef0c461 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java @@ -70,7 +70,7 @@ class RegionMergeRequest implements Runnable { //acquire a shared read lock on the table, so that table schema modifications //do not happen concurrently - tableLock = server.getTableLockManager().readLock(region_a.getTableDesc().getName() + tableLock = server.getTableLockManager().readLock(region_a.getTableDesc().getTableName() , "MERGE_REGIONS:" + region_a.getRegionNameAsString() + ", " + region_b.getRegionNameAsString()); try { tableLock.acquire(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java index 59f94c0..23feb57 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java @@ -163,8 +163,8 @@ public class RegionMergeTransaction { * false if they are not (e.g. its already closed, etc.). */ public boolean prepare(final RegionServerServices services) { - if (!region_a.getTableDesc().getNameAsString() - .equals(region_b.getTableDesc().getNameAsString())) { + if (!region_a.getTableDesc().getTableName() + .equals(region_b.getTableDesc().getTableName())) { LOG.info("Can't merge regions " + region_a + "," + region_b + " because they do not belong to the same table"); return false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java index adb8d82..f5ab54d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java @@ -121,7 +121,7 @@ public abstract class RegionSplitPolicy extends Configured { } catch (Exception e) { throw new IOException( "Unable to load configured region split policy '" + - className + "' for table '" + htd.getNameAsString() + "'", + className + "' for table '" + htd.getTableName() + "'", e); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java index 2d757f6..7deb220 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java @@ -66,7 +66,7 @@ class SplitRequest implements Runnable { //acquire a shared read lock on the table, so that table schema modifications //do not happen concurrently - tableLock = server.getTableLockManager().readLock(parent.getTableDesc().getName() + tableLock = server.getTableLockManager().readLock(parent.getTableDesc().getTableName() , "SPLIT_REGION:" + parent.getRegionNameAsString()); try { tableLock.acquire(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java index 4f883a3..7c8af12 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java @@ -34,7 +34,6 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.RegionTransition; import org.apache.hadoop.hbase.Server; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 79454fd..bcd82ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; @@ -327,7 +328,7 @@ public interface Store extends HeapSize, StoreConfigInformation { String getColumnFamilyName(); - String getTableName(); + TableName getTableName(); /* * @param o Observer who wants to know about changes in set of Readers diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java index 233efb0..3a1a543 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java @@ -262,7 +262,7 @@ public class StoreFileInfo { /* * Return path to the file referred to by a Reference. Presumes a directory - * hierarchy of ${hbase.rootdir}/tablename/regionname/familyname. + * hierarchy of ${hbase.rootdir}/data/${namespace}/tablename/regionname/familyname. * @param p Path to a Reference file. * @return Calculated path to parent region file. * @throws IllegalArgumentException when path regex fails to match. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index 60e23df..74da203 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -35,6 +35,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DaemonThreadFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.master.snapshot.MasterSnapshotVerifier; @@ -44,12 +45,12 @@ import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs; import org.apache.hadoop.hbase.procedure.Subprocedure; import org.apache.hadoop.hbase.procedure.SubprocedureFactory; import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; @@ -225,8 +226,7 @@ public class RegionServerSnapshotManager { * @throws IOException */ private List getRegionsToSnapshot(SnapshotDescription snapshot) throws IOException { - byte[] table = Bytes.toBytes(snapshot.getTable()); - return rss.getOnlineRegions(table); + return rss.getOnlineRegions(TableName.valueOf(snapshot.getTable())); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 0ea32db..77100c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -49,6 +49,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Syncable; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -821,20 +822,20 @@ class FSHLog implements HLog, Syncable { * @param clusterId * @return New log key. */ - protected HLogKey makeKey(byte[] encodedRegionName, byte[] tableName, long seqnum, + protected HLogKey makeKey(byte[] encodedRegionName, TableName tableName, long seqnum, long now, UUID clusterId) { return new HLogKey(encodedRegionName, tableName, seqnum, now, clusterId); } @Override - public void append(HRegionInfo info, byte [] tableName, WALEdit edits, + public void append(HRegionInfo info, TableName tableName, WALEdit edits, final long now, HTableDescriptor htd) throws IOException { append(info, tableName, edits, now, htd, true); } @Override - public void append(HRegionInfo info, byte [] tableName, WALEdit edits, + public void append(HRegionInfo info, TableName tableName, WALEdit edits, final long now, HTableDescriptor htd, boolean isInMemstore) throws IOException { append(info, tableName, edits, HConstants.DEFAULT_CLUSTER_ID, now, htd, true, isInMemstore); } @@ -866,7 +867,7 @@ class FSHLog implements HLog, Syncable { * @throws IOException */ @SuppressWarnings("deprecation") - private long append(HRegionInfo info, byte [] tableName, WALEdit edits, UUID clusterId, + private long append(HRegionInfo info, TableName tableName, WALEdit edits, UUID clusterId, final long now, HTableDescriptor htd, boolean doSync, boolean isInMemstore) throws IOException { if (edits.isEmpty()) return this.unflushedEntries.get(); @@ -905,7 +906,7 @@ class FSHLog implements HLog, Syncable { } @Override - public long appendNoSync(HRegionInfo info, byte [] tableName, WALEdit edits, + public long appendNoSync(HRegionInfo info, TableName tableName, WALEdit edits, UUID clusterId, final long now, HTableDescriptor htd) throws IOException { return append(info, tableName, edits, clusterId, now, htd, false, true); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java index 548364b..7e37ccd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java @@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALTrailer; @@ -263,12 +264,11 @@ public interface HLog { void closeAndDelete() throws IOException; /** - * Same as {@link #appendNoSync(HRegionInfo, byte[], WALEdit, UUID, long, HTableDescriptor)}, + * Same as {@link #appendNoSync(HRegionInfo, TableName, WALEdit, UUID, long, HTableDescriptor)}, * except it causes a sync on the log */ - void append( - HRegionInfo info, byte[] tableName, WALEdit edits, final long now, HTableDescriptor htd - ) throws IOException; + public void append(HRegionInfo info, TableName tableName, WALEdit edits, + final long now, HTableDescriptor htd) throws IOException; /** * Append a set of edits to the log. Log edits are keyed by (encoded) @@ -281,14 +281,8 @@ public interface HLog { * @param htd * @param isInMemstore Whether the record is in memstore. False for system records. */ - void append( - HRegionInfo info, - byte[] tableName, - WALEdit edits, - final long now, - HTableDescriptor htd, - boolean isInMemstore - ) throws IOException; + public void append(HRegionInfo info, TableName tableName, WALEdit edits, + final long now, HTableDescriptor htd, boolean isInMemstore) throws IOException; /** * Append a set of edits to the log. Log edits are keyed by (encoded) @@ -305,14 +299,8 @@ public interface HLog { * @return txid of this transaction * @throws IOException */ - long appendNoSync( - HRegionInfo info, - byte[] tableName, - WALEdit edits, - UUID clusterId, - final long now, - HTableDescriptor htd - ) throws IOException; + public long appendNoSync(HRegionInfo info, TableName tableName, WALEdit edits, + UUID clusterId, final long now, HTableDescriptor htd) throws IOException; void hsync() throws IOException; @@ -333,7 +321,7 @@ public interface HLog { * to flush memstore. * * We stash the oldest seqNum for the region, and let the the next edit inserted in this - * region be recorded in {@link #append(HRegionInfo, byte[], WALEdit, long, HTableDescriptor)} + * region be recorded in {@link #append(HRegionInfo, TableName, WALEdit, long, HTableDescriptor)} * as new oldest seqnum. In case of flush being aborted, we put the stashed value back; * in case of flush succeeding, the seqNum of that first edit after start becomes the * valid oldest seqNum for this region. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java index 0028abe..481ecc6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java @@ -22,7 +22,6 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.EOFException; import java.io.IOException; -import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; import java.util.NavigableMap; @@ -32,6 +31,7 @@ import java.util.UUID; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FamilyScope; @@ -97,7 +97,7 @@ public class HLogKey implements WritableComparable { // The encoded region name. private byte [] encodedRegionName; - private byte [] tablename; + private TableName tablename; private long logSeqNum; // Time at which this edit was written. private long writeTime; @@ -125,7 +125,7 @@ public class HLogKey implements WritableComparable { * @param now Time at which this edit was written. * @param clusterId of the cluster (used in Replication) */ - public HLogKey(final byte [] encodedRegionName, final byte [] tablename, + public HLogKey(final byte [] encodedRegionName, final TableName tablename, long logSeqNum, final long now, UUID clusterId) { this.logSeqNum = logSeqNum; this.writeTime = now; @@ -155,7 +155,7 @@ public class HLogKey implements WritableComparable { } /** @return table name */ - public byte [] getTablename() { + public TableName getTablename() { return tablename; } @@ -197,7 +197,7 @@ public class HLogKey implements WritableComparable { @Override public String toString() { - return Bytes.toString(tablename) + "/" + Bytes.toString(encodedRegionName) + "/" + + return tablename + "/" + Bytes.toString(encodedRegionName) + "/" + logSeqNum; } @@ -210,7 +210,7 @@ public class HLogKey implements WritableComparable { */ public Map toStringMap() { Map stringMap = new HashMap(); - stringMap.put("table", Bytes.toStringBinary(tablename)); + stringMap.put("table", tablename); stringMap.put("region", Bytes.toStringBinary(encodedRegionName)); stringMap.put("sequence", logSeqNum); return stringMap; @@ -262,10 +262,10 @@ public class HLogKey implements WritableComparable { * meant to be a general purpose setter - it's only used * to collapse references to conserve memory. */ - void internTableName(byte []tablename) { + void internTableName(TableName tablename) { // We should not use this as a setter - only to swap // in a new reference to the same table name. - assert Bytes.equals(tablename, this.tablename); + assert tablename.equals(this.tablename); this.tablename = tablename; } @@ -289,12 +289,12 @@ public class HLogKey implements WritableComparable { WritableUtils.writeVInt(out, VERSION.code); if (compressionContext == null) { Bytes.writeByteArray(out, this.encodedRegionName); - Bytes.writeByteArray(out, this.tablename); + Bytes.writeByteArray(out, this.tablename.getName()); } else { Compressor.writeCompressed(this.encodedRegionName, 0, this.encodedRegionName.length, out, compressionContext.regionDict); - Compressor.writeCompressed(this.tablename, 0, this.tablename.length, out, + Compressor.writeCompressed(this.tablename.getName(), 0, this.tablename.getName().length, out, compressionContext.tableDict); } out.writeLong(this.logSeqNum); @@ -334,10 +334,12 @@ public class HLogKey implements WritableComparable { if (compressionContext == null || !version.atLeast(Version.COMPRESSED)) { this.encodedRegionName = new byte[len]; in.readFully(this.encodedRegionName); - this.tablename = Bytes.readByteArray(in); + byte[] tablenameBytes = Bytes.readByteArray(in); + this.tablename = TableName.valueOf(tablenameBytes); } else { this.encodedRegionName = Compressor.readCompressed(in, compressionContext.regionDict); - this.tablename = Compressor.readCompressed(in, compressionContext.tableDict); + byte[] tablenameBytes = Compressor.readCompressed(in, compressionContext.tableDict); + this.tablename = TableName.valueOf(tablenameBytes); } this.logSeqNum = in.readLong(); @@ -362,11 +364,12 @@ public class HLogKey implements WritableComparable { WALKey.Builder builder = WALKey.newBuilder(); if (compressionContext == null) { builder.setEncodedRegionName(ByteString.copyFrom(this.encodedRegionName)); - builder.setTableName(ByteString.copyFrom(this.tablename)); + builder.setTableName(ByteString.copyFrom(this.tablename.getName())); } else { builder.setEncodedRegionName( compressor.compress(this.encodedRegionName, compressionContext.regionDict)); - builder.setTableName(compressor.compress(this.tablename, compressionContext.tableDict)); + builder.setTableName(compressor.compress(this.tablename.getName(), + compressionContext.tableDict)); } builder.setLogSequenceNumber(this.logSeqNum); builder.setWriteTime(writeTime); @@ -391,11 +394,12 @@ public class HLogKey implements WritableComparable { if (this.compressionContext != null) { this.encodedRegionName = uncompressor.uncompress( walKey.getEncodedRegionName(), compressionContext.regionDict); - this.tablename = uncompressor.uncompress( + byte[] tablenameBytes = uncompressor.uncompress( walKey.getTableName(), compressionContext.tableDict); + this.tablename = TableName.valueOf(tablenameBytes); } else { this.encodedRegionName = walKey.getEncodedRegionName().toByteArray(); - this.tablename = walKey.getTableName().toByteArray(); + this.tablename = TableName.valueOf(walKey.getTableName().toByteArray()); } this.clusterId = HConstants.DEFAULT_CLUSTER_ID; if (walKey.hasClusterId()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java index 994a0d8..a3a292a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java @@ -22,6 +22,8 @@ import java.io.EOFException; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InterruptedIOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; import java.text.ParseException; import java.util.ArrayList; import java.util.Collections; @@ -35,6 +37,7 @@ import java.util.TreeSet; import java.util.concurrent.Callable; import java.util.concurrent.CompletionService; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.Future; @@ -51,10 +54,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.ServerName; @@ -114,7 +117,8 @@ public class HLogSplitter { OutputSink outputSink; EntryBuffers entryBuffers; - private Set disablingOrDisabledTables = new HashSet(); + private Set disablingOrDisabledTables = + new HashSet(); private ZooKeeperWatcher watcher; // If an exception is thrown by one of the other threads, it will be @@ -449,7 +453,7 @@ public class HLogSplitter { static Path getRegionSplitEditsPath(final FileSystem fs, final Entry logEntry, final Path rootDir, boolean isCreate) throws IOException { - Path tableDir = HTableDescriptor.getTableDir(rootDir, logEntry.getKey().getTablename()); + Path tableDir = FSUtils.getTableDir(rootDir, logEntry.getKey().getTablename()); String encodedRegionName = Bytes.toString(logEntry.getKey().getEncodedRegionName()); Path regiondir = HRegion.getRegionDir(tableDir, encodedRegionName); Path dir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir); @@ -747,11 +751,11 @@ public class HLogSplitter { static class RegionEntryBuffer implements HeapSize { long heapInBuffer = 0; List entryBuffer; - byte[] tableName; + TableName tableName; byte[] encodedRegionName; - RegionEntryBuffer(byte[] table, byte[] region) { - this.tableName = table; + RegionEntryBuffer(TableName tableName, byte[] region) { + this.tableName = tableName; this.encodedRegionName = region; this.entryBuffer = new LinkedList(); } @@ -957,7 +961,7 @@ public class HLogSplitter { abstract int getNumberOfRecoveredRegions(); /** - * @param entry A WAL Edit Entry + * @param buffer A WAL Edit Entry * @throws IOException */ abstract void append(RegionEntryBuffer buffer) throws IOException; @@ -1304,8 +1308,8 @@ public class HLogSplitter { private final Map onlineRegions = new ConcurrentHashMap(); - private Map tableNameToHConnectionMap = Collections - .synchronizedMap(new TreeMap(Bytes.BYTES_COMPARATOR)); + private Map tableNameToHConnectionMap = Collections + .synchronizedMap(new TreeMap()); /** * Map key -> value layout * : -> Queue @@ -1337,7 +1341,7 @@ public class HLogSplitter { } // check if current region in a disabling or disabled table - if (disablingOrDisabledTables.contains(Bytes.toString(buffer.tableName))) { + if (disablingOrDisabledTables.contains(buffer.tableName)) { // need fall back to old way logRecoveredEditsOutputSink.append(buffer); hasEditsInDisablingOrDisabledTables = true; @@ -1387,11 +1391,11 @@ public class HLogSplitter { * @throws IOException */ private void groupEditsByServer(List entries) throws IOException { - Set nonExistentTables = null; + Set nonExistentTables = null; Long cachedLastFlushedSequenceId = -1l; for (HLog.Entry entry : entries) { WALEdit edit = entry.getEdit(); - byte[] table = entry.getKey().getTablename(); + TableName table = entry.getKey().getTablename(); String encodeRegionNameStr = Bytes.toString(entry.getKey().getEncodedRegionName()); // skip edits of non-existent tables if (nonExistentTables != null && nonExistentTables.contains(table)) { @@ -1439,11 +1443,11 @@ public class HLogSplitter { encodeRegionNameStr); } catch (TableNotFoundException ex) { // table has been deleted so skip edits of the table - LOG.info("Table " + Bytes.toString(table) + LOG.info("Table " + table + " doesn't exist. Skip log replay for region " + encodeRegionNameStr); lastFlushedSequenceIds.put(encodeRegionNameStr, Long.MAX_VALUE); if (nonExistentTables == null) { - nonExistentTables = new TreeSet(Bytes.BYTES_COMPARATOR); + nonExistentTables = new TreeSet(); } nonExistentTables.add(table); this.skippedEdits.incrementAndGet(); @@ -1482,7 +1486,7 @@ public class HLogSplitter { put.setClusterId(entry.getKey().getClusterId()); preRow = put; } - preKey = loc.getHostnamePort() + KEY_DELIMITER + Bytes.toString(table); + preKey = loc.getHostnamePort() + KEY_DELIMITER + table; preLoc = loc; } if (kv.isDelete()) { @@ -1518,8 +1522,7 @@ public class HLogSplitter { * @throws IOException */ private HRegionLocation locateRegionAndRefreshLastFlushedSequenceId(HConnection hconn, - byte[] table, byte[] row, String originalEncodedRegionName) throws IOException { - + TableName table, byte[] row, String originalEncodedRegionName) throws IOException { // fetch location from cache HRegionLocation loc = onlineRegions.get(originalEncodedRegionName); if(loc != null) return loc; @@ -1527,7 +1530,7 @@ public class HLogSplitter { loc = hconn.getRegionLocation(table, row, true); if (loc == null) { throw new IOException("Can't locate location for row:" + Bytes.toString(row) - + " of table:" + Bytes.toString(table)); + + " of table:" + table); } // check if current row moves to a different region due to region merge/split if (!originalEncodedRegionName.equalsIgnoreCase(loc.getRegionInfo().getEncodedName())) { @@ -1615,7 +1618,7 @@ public class HLogSplitter { final long pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE); boolean reloadLocation = false; - byte[] tableName = loc.getRegionInfo().getTableName(); + TableName tableName = loc.getRegionInfo().getTableName(); int tries = 0; Throwable cause = null; while (endTime > EnvironmentEdgeManager.currentTimeMillis()) { @@ -1741,7 +1744,7 @@ public class HLogSplitter { // close connections synchronized (this.tableNameToHConnectionMap) { - for (byte[] tableName : this.tableNameToHConnectionMap.keySet()) { + for (TableName tableName : this.tableNameToHConnectionMap.keySet()) { HConnection hconn = this.tableNameToHConnectionMap.get(tableName); try { hconn.clearRegionCache(); @@ -1783,23 +1786,23 @@ public class HLogSplitter { return ret; } - String tableName = getTableFromLocationStr(loc); - if(tableName.isEmpty()){ + TableName tableName = getTableFromLocationStr(loc); + if(tableName != null){ LOG.warn("Invalid location string:" + loc + " found."); } - HConnection hconn = getConnectionByTableName(Bytes.toBytes(tableName)); + HConnection hconn = getConnectionByTableName(tableName); synchronized (writers) { ret = writers.get(loc); if (ret == null) { - ret = new RegionServerWriter(conf, Bytes.toBytes(tableName), hconn); + ret = new RegionServerWriter(conf, tableName, hconn); writers.put(loc, ret); } } return ret; } - private HConnection getConnectionByTableName(final byte[] tableName) throws IOException { + private HConnection getConnectionByTableName(final TableName tableName) throws IOException { HConnection hconn = this.tableNameToHConnectionMap.get(tableName); if (hconn == null) { synchronized (this.tableNameToHConnectionMap) { @@ -1812,16 +1815,15 @@ public class HLogSplitter { } return hconn; } - - private String getTableFromLocationStr(String loc) { + private TableName getTableFromLocationStr(String loc) { /** * location key is in format #
*/ String[] splits = loc.split(KEY_DELIMITER); if (splits.length != 2) { - return ""; + return null; } - return splits[1]; + return TableName.valueOf(splits[1]); } } @@ -1832,7 +1834,7 @@ public class HLogSplitter { private final static class RegionServerWriter extends SinkWriter { final WALEditsReplaySink sink; - RegionServerWriter(final Configuration conf, final byte[] tableName, final HConnection conn) + RegionServerWriter(final Configuration conf, final TableName tableName, final HConnection conn) throws IOException { this.sink = new WALEditsReplaySink(conf, tableName, conn); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java index 95fbda9..2bb9d43 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -259,7 +260,7 @@ public class HLogUtil { public static void writeCompactionMarker(HLog log, HTableDescriptor htd, HRegionInfo info, final CompactionDescriptor c) throws IOException { WALEdit e = WALEdit.createCompaction(c); - log.append(info, c.getTableName().toByteArray(), e, + log.append(info, TableName.valueOf(c.getTableName().toByteArray()), e, EnvironmentEdgeManager.currentTimeMillis(), htd, false); if (LOG.isTraceEnabled()) { LOG.trace("Appended compaction marker " + TextFormat.shortDebugString(c)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java index 096a75e..ff1ae85 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java @@ -29,6 +29,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -65,7 +66,7 @@ public class WALEditsReplaySink { private final Configuration conf; private final HConnection conn; - private final byte[] tableName; + private final TableName tableName; private final MetricsWALEditsReplay metrics; private final AtomicLong totalReplayedEdits = new AtomicLong(); private final boolean skipErrors; @@ -78,7 +79,7 @@ public class WALEditsReplaySink { * @param conn * @throws IOException */ - public WALEditsReplaySink(Configuration conf, byte[] tableName, HConnection conn) + public WALEditsReplaySink(Configuration conf, TableName tableName, HConnection conn) throws IOException { this.conf = conf; this.metrics = new MetricsWALEditsReplay(); @@ -185,7 +186,7 @@ public class WALEditsReplaySink { private HRegionInfo regionInfo; private List> actions; - ReplayServerCallable(final HConnection connection, final byte [] tableName, + ReplayServerCallable(final HConnection connection, final TableName tableName, final HRegionLocation regionLoc, final HRegionInfo regionInfo, final List> actions) { super(connection, tableName, null); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java index 6f74ec5..b276277 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java @@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValueUtil; @@ -51,7 +52,6 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; /** @@ -127,9 +127,11 @@ public class ReplicationSink { long totalReplicated = 0; // Map of table => list of Rows, we only want to flushCommits once per // invocation of this method per table. - Map> rows = new TreeMap>(Bytes.BYTES_COMPARATOR); + Map> rows = + new TreeMap>(); for (WALEntry entry : entries) { - byte[] table = entry.getKey().getTableName().toByteArray(); + TableName table = + TableName.valueOf(entry.getKey().getTableName().toByteArray()); Cell previousCell = null; Mutation m = null; java.util.UUID uuid = toUUID(entry.getKey().getClusterId()); @@ -157,7 +159,7 @@ public class ReplicationSink { } totalReplicated++; } - for (Entry> entry : rows.entrySet()) { + for (Entry> entry : rows.entrySet()) { batch(entry.getKey(), entry.getValue()); } int size = entries.size(); @@ -229,7 +231,7 @@ public class ReplicationSink { * @param rows list of actions * @throws IOException */ - private void batch(byte[] tableName, List rows) throws IOException { + private void batch(TableName tableName, List rows) throws IOException { if (rows.isEmpty()) { return; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 7973a40..8976170 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; @@ -385,8 +386,9 @@ public class ReplicationSource extends Thread removeNonReplicableEdits(entry); // Don't replicate catalog entries, if the WALEdit wasn't // containing anything to replicate and if we're currently not set to replicate - if (!(Bytes.equals(logKey.getTablename(), HConstants.ROOT_TABLE_NAME) || Bytes.equals( - logKey.getTablename(), HConstants.META_TABLE_NAME)) && edit.size() != 0) { + if (!(logKey.getTablename().equals(TableName.ROOT_TABLE_NAME) || + logKey.getTablename().equals(TableName.META_TABLE_NAME)) && + edit.size() != 0) { // Only set the clusterId if is a local key. // This ensures that the originator sets the cluster id // and all replicas retain the initial cluster id. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java index 846448d..8549982 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java @@ -34,13 +34,13 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.MetaScanner; import org.apache.hadoop.hbase.rest.model.TableInfoModel; import org.apache.hadoop.hbase.rest.model.TableRegionModel; -import org.apache.hadoop.hbase.util.Bytes; @InterfaceAudience.Private public class RegionsResource extends ResourceBase { @@ -74,15 +74,15 @@ public class RegionsResource extends ResourceBase { } servlet.getMetrics().incrementRequests(1); try { - String tableName = tableResource.getName(); - TableInfoModel model = new TableInfoModel(tableName); + TableName tableName = TableName.valueOf(tableResource.getName()); + TableInfoModel model = new TableInfoModel(tableName.getNameAsString()); Map regions = MetaScanner.allTableRegions( - servlet.getConfiguration(), null, Bytes.toBytes(tableName), false); + servlet.getConfiguration(), null, tableName, false); for (Map.Entry e: regions.entrySet()) { HRegionInfo hri = e.getKey(); ServerName addr = e.getValue(); model.add( - new TableRegionModel(tableName, hri.getRegionId(), + new TableRegionModel(tableName.getNameAsString(), hri.getRegionId(), hri.getStartKey(), hri.getEndKey(), addr.getHostAndPort())); } ResponseBuilder response = Response.ok(model); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java index b2e6cf4..7747dd0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java @@ -63,7 +63,7 @@ public class RootResource extends ResourceBase { TableListModel tableList = new TableListModel(); HTableDescriptor[] list = servlet.getAdmin().listTables(); for (HTableDescriptor htd: list) { - tableList.add(new TableModel(htd.getNameAsString())); + tableList.add(new TableModel(htd.getTableName().getNameAsString())); } return tableList; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java index 09bc4ce..fe2b6ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java @@ -42,6 +42,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -120,7 +121,7 @@ public class SchemaResource extends ResourceBase { .build(); } try { - HTableDescriptor htd = new HTableDescriptor(name); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); for (Map.Entry e: model.getAny().entrySet()) { htd.setValue(e.getKey().getLocalPart(), e.getValue().toString()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java index f40551b..c48ab98 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java @@ -279,7 +279,7 @@ public class RemoteAdmin { path.append(accessToken); path.append('/'); } - path.append(Bytes.toStringBinary(desc.getName())); + path.append(desc.getTableName()); path.append('/'); path.append("schema"); int code = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java index 4c6a9a2..e1e782b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java @@ -33,6 +33,7 @@ import com.google.protobuf.ServiceException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; @@ -233,6 +234,11 @@ public class RemoteHTable implements HTableInterface { return name.clone(); } + @Override + public TableName getName() { + return TableName.valueOf(name); + } + public Configuration getConfiguration() { return conf; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java index 9a6adff..02ba035 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java @@ -25,6 +25,7 @@ import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.util.Bytes; @@ -95,9 +96,10 @@ public class TableRegionModel implements Serializable { @XmlAttribute public String getName() { byte [] tableNameAsBytes = Bytes.toBytes(this.table); - byte [] nameAsBytes = HRegionInfo.createRegionName(tableNameAsBytes, + byte [] nameAsBytes = HRegionInfo.createRegionName( + TableName.valueOf(tableNameAsBytes), this.startKey, this.id, - !HTableDescriptor.isMetaTable(tableNameAsBytes)); + !HTableDescriptor.isSystemTable(TableName.valueOf(tableNameAsBytes))); return Bytes.toString(nameAsBytes); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java index 0f5e31d..592e31a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java @@ -37,6 +37,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.hadoop.hbase.rest.protobuf.generated.ColumnSchemaMessage.ColumnSchema; @@ -83,7 +84,7 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { * @param htd the table descriptor */ public TableSchemaModel(HTableDescriptor htd) { - setName(htd.getNameAsString()); + setName(htd.getTableName().getNameAsString()); for (Map.Entry e: htd.getValues().entrySet()) { addAttribute(Bytes.toString(e.getKey().get()), @@ -337,7 +338,7 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { * @return a table descriptor */ public HTableDescriptor getTableDescriptor() { - HTableDescriptor htd = new HTableDescriptor(getName()); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(getName())); for (Map.Entry e: getAny().entrySet()) { htd.setValue(e.getKey().getLocalPart(), e.getValue().toString()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java index 8d01eff..077ae82 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.security.access; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.filter.FilterBase; @@ -41,7 +42,7 @@ import org.apache.hadoop.hbase.security.User; class AccessControlFilter extends FilterBase { private TableAuthManager authManager; - private byte[] table; + private TableName table; private User user; /** @@ -51,7 +52,7 @@ class AccessControlFilter extends FilterBase { } AccessControlFilter(TableAuthManager mgr, User ugi, - byte[] tableName) { + TableName tableName) { authManager = mgr; table = tableName; user = ugi; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java index 7f7138f..5f670b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java @@ -32,6 +32,7 @@ import java.util.TreeSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -90,14 +91,15 @@ public class AccessControlLists { /** Internal storage table for access control lists */ public static final String ACL_TABLE_NAME_STR = "_acl_"; public static final byte[] ACL_TABLE_NAME = Bytes.toBytes(ACL_TABLE_NAME_STR); + public static final TableName ACL_TABLE = + TableName.valueOf(ACL_TABLE_NAME); public static final byte[] ACL_GLOBAL_NAME = ACL_TABLE_NAME; /** Column family used to store ACL grants */ public static final String ACL_LIST_FAMILY_STR = "l"; public static final byte[] ACL_LIST_FAMILY = Bytes.toBytes(ACL_LIST_FAMILY_STR); /** Table descriptor for ACL internal table */ - public static final HTableDescriptor ACL_TABLEDESC = new HTableDescriptor( - ACL_TABLE_NAME); + public static final HTableDescriptor ACL_TABLEDESC = new HTableDescriptor(ACL_TABLE); static { ACL_TABLEDESC.addFamily( new HColumnDescriptor(ACL_LIST_FAMILY, @@ -123,7 +125,7 @@ public class AccessControlLists { * @param master reference to HMaster */ static void init(MasterServices master) throws IOException { - if (!MetaReader.tableExists(master.getCatalogTracker(), ACL_TABLE_NAME_STR)) { + if (!MetaReader.tableExists(master.getCatalogTracker(), ACL_TABLE)) { master.createTable(ACL_TABLEDESC, null); } } @@ -138,7 +140,7 @@ public class AccessControlLists { throws IOException { Permission.Action[] actions = userPerm.getActions(); - Put p = new Put(userPerm.isGlobal() ? ACL_GLOBAL_NAME : userPerm.getTable()); + Put p = new Put(userPerm.isGlobal() ? ACL_GLOBAL_NAME : userPerm.getTable().getName()); byte[] key = userPermissionKey(userPerm); if ((actions == null) || (actions.length == 0)) { @@ -154,7 +156,7 @@ public class AccessControlLists { p.add(ACL_LIST_FAMILY, key, value); if (LOG.isDebugEnabled()) { LOG.debug("Writing permission for table "+ - Bytes.toString(userPerm.getTable())+" "+ + userPerm.getTable()+" "+ Bytes.toString(key)+": "+Bytes.toStringBinary(value) ); } @@ -183,7 +185,7 @@ public class AccessControlLists { static void removeUserPermission(Configuration conf, UserPermission userPerm) throws IOException { - Delete d = new Delete(userPerm.isGlobal() ? ACL_GLOBAL_NAME : userPerm.getTable()); + Delete d = new Delete(userPerm.isGlobal() ? ACL_GLOBAL_NAME : userPerm.getTable().getName()); byte[] key = userPermissionKey(userPerm); if (LOG.isDebugEnabled()) { @@ -202,12 +204,12 @@ public class AccessControlLists { /** * Remove specified table from the _acl_ table. */ - static void removeTablePermissions(Configuration conf, byte[] tableName) + static void removeTablePermissions(Configuration conf, TableName tableName) throws IOException{ - Delete d = new Delete(tableName); + Delete d = new Delete(tableName.getName()); if (LOG.isDebugEnabled()) { - LOG.debug("Removing permissions of removed table "+ Bytes.toString(tableName)); + LOG.debug("Removing permissions of removed table "+ tableName); } HTable acls = null; @@ -222,12 +224,12 @@ public class AccessControlLists { /** * Remove specified table column from the _acl_ table. */ - static void removeTablePermissions(Configuration conf, byte[] tableName, byte[] column) + static void removeTablePermissions(Configuration conf, TableName tableName, byte[] column) throws IOException{ if (LOG.isDebugEnabled()) { LOG.debug("Removing permissions of removed column " + Bytes.toString(column) + - " from table "+ Bytes.toString(tableName)); + " from table "+ tableName); } HTable acls = null; @@ -256,7 +258,7 @@ public class AccessControlLists { } if (qualifierSet.size() > 0) { - Delete d = new Delete(tableName); + Delete d = new Delete(tableName.getName()); for (byte[] qualifier : qualifierSet) { d.deleteColumns(ACL_LIST_FAMILY, qualifier); } @@ -293,14 +295,14 @@ public class AccessControlLists { * metadata table. */ static boolean isAclRegion(HRegion region) { - return Bytes.equals(ACL_TABLE_NAME, region.getTableDesc().getName()); + return ACL_TABLE.equals(region.getTableDesc().getTableName()); } /** * Returns {@code true} if the given table is {@code _acl_} metadata table. */ static boolean isAclTable(HTableDescriptor desc) { - return Bytes.equals(ACL_TABLE_NAME, desc.getName()); + return ACL_TABLE.equals(desc.getTableName()); } /** @@ -311,7 +313,7 @@ public class AccessControlLists { * @return a map of the permissions for this table. * @throws IOException */ - static Map> loadAll( + static Map> loadAll( HRegion aclRegion) throws IOException { @@ -319,8 +321,8 @@ public class AccessControlLists { throw new IOException("Can only load permissions from "+ACL_TABLE_NAME_STR); } - Map> allPerms = - new TreeMap>(Bytes.BYTES_COMPARATOR); + Map> allPerms = + new TreeMap>(); // do a full scan of _acl_ table @@ -336,10 +338,10 @@ public class AccessControlLists { boolean hasNext = iScanner.next(row); ListMultimap perms = ArrayListMultimap.create(); - byte[] table = null; + TableName table = null; for (KeyValue kv : row) { if (table == null) { - table = kv.getRow(); + table = TableName.valueOf(kv.getRow()); } Pair permissionsOfUserOnTable = parseTablePermissionRecord(table, kv); @@ -369,10 +371,10 @@ public class AccessControlLists { * Load all permissions from the region server holding {@code _acl_}, * primarily intended for testing purposes. */ - static Map> loadAll( + static Map> loadAll( Configuration conf) throws IOException { - Map> allPerms = - new TreeMap>(Bytes.BYTES_COMPARATOR); + Map> allPerms = + new TreeMap>(); // do a full scan of _acl_, filtering on only first table region rows @@ -385,9 +387,10 @@ public class AccessControlLists { acls = new HTable(conf, ACL_TABLE_NAME); scanner = acls.getScanner(scan); for (Result row : scanner) { + TableName tableName = TableName.valueOf(row.getRow()); ListMultimap resultPerms = - parseTablePermissions(row.getRow(), row); - allPerms.put(row.getRow(), resultPerms); + parseTablePermissions(tableName, row); + allPerms.put(tableName, resultPerms); } } finally { if (scanner != null) scanner.close(); @@ -407,22 +410,22 @@ public class AccessControlLists { *

*/ static ListMultimap getTablePermissions(Configuration conf, - byte[] tableName) throws IOException { - if (tableName == null) tableName = ACL_TABLE_NAME; + TableName tableName) throws IOException { + if (tableName == null) tableName = ACL_TABLE; // for normal user tables, we just read the table row from _acl_ ListMultimap perms = ArrayListMultimap.create(); HTable acls = null; try { - acls = new HTable(conf, ACL_TABLE_NAME); - Get get = new Get(tableName); + acls = new HTable(conf, ACL_TABLE); + Get get = new Get(tableName.getName()); get.addFamily(ACL_LIST_FAMILY); Result row = acls.get(get); if (!row.isEmpty()) { perms = parseTablePermissions(tableName, row); } else { LOG.info("No permissions found in " + ACL_TABLE_NAME_STR + " for table " - + Bytes.toString(tableName)); + + tableName); } } finally { if (acls != null) acls.close(); @@ -436,7 +439,7 @@ public class AccessControlLists { * user plus associated permissions. */ static List getUserPermissions( - Configuration conf, byte[] tableName) + Configuration conf, TableName tableName) throws IOException { ListMultimap allPerms = getTablePermissions( conf, tableName); @@ -453,7 +456,7 @@ public class AccessControlLists { } private static ListMultimap parseTablePermissions( - byte[] table, Result result) { + TableName table, Result result) { ListMultimap perms = ArrayListMultimap.create(); if (result != null && result.size() > 0) { for (KeyValue kv : result.raw()) { @@ -472,7 +475,7 @@ public class AccessControlLists { } private static Pair parseTablePermissionRecord( - byte[] table, KeyValue kv) { + TableName table, KeyValue kv) { // return X given a set of permissions encoded in the permissionRecord kv. byte[] family = kv.getFamily(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 76c7130..8e4afeb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -16,10 +16,8 @@ package org.apache.hadoop.hbase.security.access; import java.io.IOException; import java.net.InetAddress; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; @@ -33,11 +31,13 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; @@ -139,13 +139,13 @@ public class AccessController extends BaseRegionObserver void initialize(RegionCoprocessorEnvironment e) throws IOException { final HRegion region = e.getRegion(); - Map> tables = + Map> tables = AccessControlLists.loadAll(region); // For each table, write out the table's permissions to the respective // znode for that table. - for (Map.Entry> t: + for (Map.Entry> t: tables.entrySet()) { - byte[] table = t.getKey(); + TableName table = t.getKey(); ListMultimap perms = t.getValue(); byte[] serialized = AccessControlLists.writePermissionsAsBytes(perms, e.getConfiguration()); this.authManager.getZKPermissionWatcher().writeToZookeeper(table, serialized); @@ -159,7 +159,8 @@ public class AccessController extends BaseRegionObserver */ void updateACL(RegionCoprocessorEnvironment e, final Map> familyMap) { - Set tableSet = new TreeSet(Bytes.BYTES_COMPARATOR); + Set tableSet = + new TreeSet(); for (Map.Entry> f : familyMap.entrySet()) { List cells = f.getValue(); for (Cell cell: cells) { @@ -167,21 +168,21 @@ public class AccessController extends BaseRegionObserver if (Bytes.equals(kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength(), AccessControlLists.ACL_LIST_FAMILY, 0, AccessControlLists.ACL_LIST_FAMILY.length)) { - tableSet.add(kv.getRow()); + tableSet.add(TableName.valueOf(kv.getRow())); } } } ZKPermissionWatcher zkw = this.authManager.getZKPermissionWatcher(); Configuration conf = regionEnv.getConfiguration(); - for (byte[] tableName: tableSet) { + for (TableName tableName: tableSet) { try { ListMultimap perms = AccessControlLists.getTablePermissions(conf, tableName); byte[] serialized = AccessControlLists.writePermissionsAsBytes(perms, conf); zkw.writeToZookeeper(tableName, serialized); } catch (IOException ex) { - LOG.error("Failed updating permissions mirror for '" + Bytes.toString(tableName) + "'", ex); + LOG.error("Failed updating permissions mirror for '" + tableName + "'", ex); } } } @@ -204,7 +205,7 @@ public class AccessController extends BaseRegionObserver RegionCoprocessorEnvironment e, Map> families) { HRegionInfo hri = e.getRegion().getRegionInfo(); - byte[] tableName = hri.getTableName(); + TableName tableName = hri.getTableName(); // 1. All users need read access to .META. table. // this is a very common operation, so deal with it quickly. @@ -227,7 +228,7 @@ public class AccessController extends BaseRegionObserver // and the user need to be allowed to write on both tables. if (permRequest == Permission.Action.WRITE && (hri.isMetaRegion() || - Bytes.equals(tableName, AccessControlLists.ACL_GLOBAL_NAME)) && + Bytes.equals(tableName.getName(), AccessControlLists.ACL_GLOBAL_NAME)) && (authManager.authorize(user, Permission.Action.CREATE) || authManager.authorize(user, Permission.Action.ADMIN))) { @@ -329,7 +330,7 @@ public class AccessController extends BaseRegionObserver * @throws IOException if obtaining the current user fails * @throws AccessDeniedException if user has no authorization */ - private void requirePermission(String request, byte[] tableName, byte[] family, byte[] qualifier, + private void requirePermission(String request, TableName tableName, byte[] family, byte[] qualifier, Action... permissions) throws IOException { User user = getActiveUser(); AuthResult result = null; @@ -379,7 +380,7 @@ public class AccessController extends BaseRegionObserver if (!result.isAllowed()) { throw new AccessDeniedException("Insufficient permissions (table=" + - env.getRegion().getTableDesc().getNameAsString()+ + env.getRegion().getTableDesc().getTableName()+ ((families != null && families.size() > 0) ? ", family: " + result.toFamilyString() : "") + ", action=" + perm.toString() + ")"); @@ -394,7 +395,7 @@ public class AccessController extends BaseRegionObserver * @param tableName Affected table name. * @param familiMap Affected column families. */ - private void requireGlobalPermission(String request, Permission.Action perm, byte[] tableName, + private void requireGlobalPermission(String request, Permission.Action perm, TableName tableName, Map> familyMap) throws IOException { User user = getActiveUser(); if (authManager.authorize(user, perm)) { @@ -417,7 +418,7 @@ public class AccessController extends BaseRegionObserver Map> familyMap) throws IOException { HRegionInfo hri = env.getRegion().getRegionInfo(); - byte[] tableName = hri.getTableName(); + TableName tableName = hri.getTableName(); if (user == null) { return false; @@ -490,7 +491,7 @@ public class AccessController extends BaseRegionObserver for (byte[] family: families) { familyMap.put(family, null); } - requireGlobalPermission("createTable", Permission.Action.CREATE, desc.getName(), familyMap); + requireGlobalPermission("createTable", Permission.Action.CREATE, desc.getTableName(), familyMap); } @Override @@ -504,7 +505,7 @@ public class AccessController extends BaseRegionObserver String owner = desc.getOwnerString(); // default the table owner to current user, if not specified. if (owner == null) owner = getActiveUser().getShortName(); - UserPermission userperm = new UserPermission(Bytes.toBytes(owner), desc.getName(), null, + UserPermission userperm = new UserPermission(Bytes.toBytes(owner), desc.getTableName(), null, Action.values()); AccessControlLists.addUserPermission(c.getEnvironment().getConfiguration(), userperm); } @@ -515,121 +516,121 @@ public class AccessController extends BaseRegionObserver HTableDescriptor desc, HRegionInfo[] regions) throws IOException {} @Override - public void preDeleteTable(ObserverContext c, byte[] tableName) + public void preDeleteTable(ObserverContext c, TableName tableName) throws IOException { requirePermission("deleteTable", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override public void preDeleteTableHandler(ObserverContext c, - byte[] tableName) throws IOException {} + TableName tableName) throws IOException {} @Override public void postDeleteTable(ObserverContext c, - byte[] tableName) throws IOException { + TableName tableName) throws IOException { AccessControlLists.removeTablePermissions(c.getEnvironment().getConfiguration(), tableName); } @Override public void postDeleteTableHandler(ObserverContext c, - byte[] tableName) throws IOException {} + TableName tableName) throws IOException {} @Override - public void preModifyTable(ObserverContext c, byte[] tableName, + public void preModifyTable(ObserverContext c, TableName tableName, HTableDescriptor htd) throws IOException { requirePermission("modifyTable", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override public void preModifyTableHandler(ObserverContext c, - byte[] tableName, HTableDescriptor htd) throws IOException {} + TableName tableName, HTableDescriptor htd) throws IOException {} @Override public void postModifyTable(ObserverContext c, - byte[] tableName, HTableDescriptor htd) throws IOException { + TableName tableName, HTableDescriptor htd) throws IOException { String owner = htd.getOwnerString(); // default the table owner to current user, if not specified. if (owner == null) owner = getActiveUser().getShortName(); - UserPermission userperm = new UserPermission(Bytes.toBytes(owner), htd.getName(), null, + UserPermission userperm = new UserPermission(Bytes.toBytes(owner), htd.getTableName(), null, Action.values()); AccessControlLists.addUserPermission(c.getEnvironment().getConfiguration(), userperm); } @Override public void postModifyTableHandler(ObserverContext c, - byte[] tableName, HTableDescriptor htd) throws IOException {} + TableName tableName, HTableDescriptor htd) throws IOException {} @Override - public void preAddColumn(ObserverContext c, byte[] tableName, + public void preAddColumn(ObserverContext c, TableName tableName, HColumnDescriptor column) throws IOException { requirePermission("addColumn", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override public void preAddColumnHandler(ObserverContext c, - byte[] tableName, HColumnDescriptor column) throws IOException {} + TableName tableName, HColumnDescriptor column) throws IOException {} @Override public void postAddColumn(ObserverContext c, - byte[] tableName, HColumnDescriptor column) throws IOException {} + TableName tableName, HColumnDescriptor column) throws IOException {} @Override public void postAddColumnHandler(ObserverContext c, - byte[] tableName, HColumnDescriptor column) throws IOException {} + TableName tableName, HColumnDescriptor column) throws IOException {} @Override - public void preModifyColumn(ObserverContext c, byte[] tableName, + public void preModifyColumn(ObserverContext c, TableName tableName, HColumnDescriptor descriptor) throws IOException { requirePermission("modifyColumn", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override public void preModifyColumnHandler(ObserverContext c, - byte[] tableName, HColumnDescriptor descriptor) throws IOException {} + TableName tableName, HColumnDescriptor descriptor) throws IOException {} @Override public void postModifyColumn(ObserverContext c, - byte[] tableName, HColumnDescriptor descriptor) throws IOException {} + TableName tableName, HColumnDescriptor descriptor) throws IOException {} @Override public void postModifyColumnHandler(ObserverContext c, - byte[] tableName, HColumnDescriptor descriptor) throws IOException {} + TableName tableName, HColumnDescriptor descriptor) throws IOException {} @Override - public void preDeleteColumn(ObserverContext c, byte[] tableName, + public void preDeleteColumn(ObserverContext c, TableName tableName, byte[] col) throws IOException { requirePermission("deleteColumn", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override public void preDeleteColumnHandler(ObserverContext c, - byte[] tableName, byte[] col) throws IOException {} + TableName tableName, byte[] col) throws IOException {} @Override public void postDeleteColumn(ObserverContext c, - byte[] tableName, byte[] col) throws IOException { + TableName tableName, byte[] col) throws IOException { AccessControlLists.removeTablePermissions(c.getEnvironment().getConfiguration(), tableName, col); } @Override public void postDeleteColumnHandler(ObserverContext c, - byte[] tableName, byte[] col) throws IOException {} + TableName tableName, byte[] col) throws IOException {} @Override - public void preEnableTable(ObserverContext c, byte[] tableName) + public void preEnableTable(ObserverContext c, TableName tableName) throws IOException { requirePermission("enableTable", tableName, null, null, Action.ADMIN, Action.CREATE); } @Override public void preEnableTableHandler(ObserverContext c, - byte[] tableName) throws IOException {} + TableName tableName) throws IOException {} @Override public void postEnableTable(ObserverContext c, - byte[] tableName) throws IOException {} + TableName tableName) throws IOException {} @Override public void postEnableTableHandler(ObserverContext c, - byte[] tableName) throws IOException {} + TableName tableName) throws IOException {} @Override - public void preDisableTable(ObserverContext c, byte[] tableName) + public void preDisableTable(ObserverContext c, TableName tableName) throws IOException { - if (Bytes.equals(tableName, AccessControlLists.ACL_GLOBAL_NAME)) { + if (Bytes.equals(tableName.getName(), AccessControlLists.ACL_GLOBAL_NAME)) { throw new AccessDeniedException("Not allowed to disable " + AccessControlLists.ACL_TABLE_NAME_STR + " table."); } @@ -638,13 +639,13 @@ public class AccessController extends BaseRegionObserver @Override public void preDisableTableHandler(ObserverContext c, - byte[] tableName) throws IOException {} + TableName tableName) throws IOException {} @Override public void postDisableTable(ObserverContext c, - byte[] tableName) throws IOException {} + TableName tableName) throws IOException {} @Override public void postDisableTableHandler(ObserverContext c, - byte[] tableName) throws IOException {} + TableName tableName) throws IOException {} @Override public void preMove(ObserverContext c, HRegionInfo region, @@ -776,6 +777,36 @@ public class AccessController extends BaseRegionObserver final SnapshotDescription snapshot) throws IOException { } + @Override + public void preCreateNamespace(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + } + + @Override + public void postCreateNamespace(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + } + + @Override + public void preDeleteNamespace(ObserverContext ctx, + String namespace) throws IOException { + } + + @Override + public void postDeleteNamespace(ObserverContext ctx, + String namespace) throws IOException { + } + + @Override + public void preModifyNamespace(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + } + + @Override + public void postModifyNamespace(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + } + /* ---- RegionObserver implementation ---- */ @Override @@ -869,7 +900,7 @@ public class AccessController extends BaseRegionObserver if (!authResult.isAllowed()) { if (hasFamilyQualifierPermission(requestUser, Permission.Action.READ, e, get.getFamilyMap())) { - byte[] table = getTableName(e); + TableName table = getTableName(e); AccessControlFilter filter = new AccessControlFilter(authManager, requestUser, table); @@ -882,11 +913,11 @@ public class AccessController extends BaseRegionObserver get.setFilter(filter); } logResult(AuthResult.allow("get", "Access allowed with filter", requestUser, - Permission.Action.READ, authResult.getTable(), get.getFamilyMap())); + Permission.Action.READ, authResult.getTableName(), get.getFamilyMap())); } else { logResult(authResult); throw new AccessDeniedException("Insufficient permissions (table=" + - e.getRegion().getTableDesc().getNameAsString() + ", action=READ)"); + e.getRegion().getTableDesc().getTableName() + ", action=READ)"); } } else { // log auth success @@ -1008,7 +1039,7 @@ public class AccessController extends BaseRegionObserver if (!authResult.isAllowed()) { if (hasFamilyQualifierPermission(user, Permission.Action.READ, e, scan.getFamilyMap())) { - byte[] table = getTableName(e); + TableName table = getTableName(e); AccessControlFilter filter = new AccessControlFilter(authManager, user, table); @@ -1021,13 +1052,13 @@ public class AccessController extends BaseRegionObserver scan.setFilter(filter); } logResult(AuthResult.allow("scannerOpen", "Access allowed with filter", user, - Permission.Action.READ, authResult.getTable(), scan.getFamilyMap())); + Permission.Action.READ, authResult.getTableName(), scan.getFamilyMap())); } else { // no table/family level perms and no qualifier level perms, reject logResult(authResult); throw new AccessDeniedException("Insufficient permissions for user '"+ (user != null ? user.getShortName() : "null")+"' "+ - "for scanner open on table " + Bytes.toString(getTableName(e))); + "for scanner open on table " + getTableName(e)); } } else { // log success @@ -1094,7 +1125,7 @@ public class AccessController extends BaseRegionObserver List> familyPaths) throws IOException { for(Pair el : familyPaths) { requirePermission("preBulkLoadHFile", - ctx.getEnvironment().getRegion().getTableDesc().getName(), + ctx.getEnvironment().getRegion().getTableDesc().getTableName(), el.getFirst(), null, Permission.Action.WRITE); @@ -1103,7 +1134,7 @@ public class AccessController extends BaseRegionObserver private AuthResult hasSomeAccess(RegionCoprocessorEnvironment e, String method, Action action) throws IOException { User requestUser = getActiveUser(); - byte[] tableName = e.getRegion().getTableDesc().getName(); + TableName tableName = e.getRegion().getTableDesc().getTableName(); AuthResult authResult = permissionGranted(method, requestUser, action, e, Collections.EMPTY_MAP); if (!authResult.isAllowed()) { @@ -1132,7 +1163,7 @@ public class AccessController extends BaseRegionObserver logResult(authResult); if (!authResult.isAllowed()) { throw new AccessDeniedException("Insufficient permissions (table=" + - e.getRegion().getTableDesc().getNameAsString() + ", action=WRITE)"); + e.getRegion().getTableDesc().getTableName() + ", action=WRITE)"); } } @@ -1148,7 +1179,7 @@ public class AccessController extends BaseRegionObserver logResult(authResult); if (!authResult.isAllowed()) { throw new AccessDeniedException("Insufficient permissions (table=" + - e.getRegion().getTableDesc().getNameAsString() + ", action=WRITE)"); + e.getRegion().getTableDesc().getTableName() + ", action=WRITE)"); } } @@ -1223,9 +1254,9 @@ public class AccessController extends BaseRegionObserver AccessControlProtos.UserPermissionsRequest request, RpcCallback done) { AccessControlProtos.UserPermissionsResponse response = null; - byte[] table = null; - if (request.hasTable()) { - table = request.getTable().toByteArray(); + TableName table = null; + if (request.hasTableName()) { + table = ProtobufUtil.toTableName(request.getTableName()); } try { // only allowed to be called on _acl_ region @@ -1256,16 +1287,16 @@ public class AccessController extends BaseRegionObserver } AccessControlProtos.CheckPermissionsResponse response = null; try { - byte[] tableName = regionEnv.getRegion().getTableDesc().getName(); + TableName tableName = regionEnv.getRegion().getTableDesc().getTableName(); for (Permission permission : permissions) { if (permission instanceof TablePermission) { TablePermission tperm = (TablePermission) permission; for (Permission.Action action : permission.getActions()) { - if (!Arrays.equals(tperm.getTable(), tableName)) { + if (!tperm.getTable().equals(tableName)) { throw new CoprocessorException(AccessController.class, String.format("This method " + "can only execute at the table specified in TablePermission. " + - "Table of the region:%s , requested table:%s", Bytes.toString(tableName), - Bytes.toString(tperm.getTable()))); + "Table of the region:%s , requested table:%s", tableName, + tperm.getTable())); } Map> familyMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); @@ -1300,9 +1331,9 @@ public class AccessController extends BaseRegionObserver return AccessControlProtos.AccessControlService.newReflectiveService(this); } - private byte[] getTableName(RegionCoprocessorEnvironment e) { + private TableName getTableName(RegionCoprocessorEnvironment e) { HRegion region = e.getRegion(); - byte[] tableName = null; + TableName tableName = null; if (region != null) { HRegionInfo regionInfo = region.getRegionInfo(); @@ -1339,10 +1370,9 @@ public class AccessController extends BaseRegionObserver } private boolean isSpecialTable(HRegionInfo regionInfo) { - byte[] tableName = regionInfo.getTableName(); - return Arrays.equals(tableName, AccessControlLists.ACL_TABLE_NAME) - || Arrays.equals(tableName, Bytes.toBytes("-ROOT-")) - || Arrays.equals(tableName, Bytes.toBytes(".META.")); + TableName tableName = regionInfo.getTableName(); + return tableName.equals(AccessControlLists.ACL_TABLE) + || tableName.equals(TableName.META_TABLE_NAME); } @Override @@ -1365,7 +1395,8 @@ public class AccessController extends BaseRegionObserver @Override public void preGetTableDescriptors(ObserverContext ctx, - List tableNamesList, List descriptors) throws IOException { + List tableNamesList, + List descriptors) throws IOException { // If the list is empty, this is a request for all table descriptors and requires GLOBAL // ADMIN privs. if (tableNamesList == null || tableNamesList.isEmpty()) { @@ -1375,18 +1406,17 @@ public class AccessController extends BaseRegionObserver // request can be granted. else { MasterServices masterServices = ctx.getEnvironment().getMasterServices(); - for (String tableName: tableNamesList) { + for (TableName tableName: tableNamesList) { // Do not deny if the table does not exist - byte[] nameAsBytes = Bytes.toBytes(tableName); try { - masterServices.checkTableModifiable(nameAsBytes); + masterServices.checkTableModifiable(tableName); } catch (TableNotFoundException ex) { // Skip checks for a table that does not exist continue; } catch (TableNotDisabledException ex) { // We don't care about this } - requirePermission("getTableDescriptors", nameAsBytes, null, null, + requirePermission("getTableDescriptors", tableName, null, null, Permission.Action.ADMIN, Permission.Action.CREATE); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java index 5e65fd9..b2623f4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java @@ -23,6 +23,7 @@ import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; @@ -35,7 +36,7 @@ import org.apache.hadoop.hbase.util.Bytes; @InterfaceStability.Evolving public class AuthResult { private final boolean allowed; - private final byte[] table; + private final TableName table; private final Permission.Action action; private final String request; private final String reason; @@ -47,7 +48,7 @@ public class AuthResult { private final Map> families; public AuthResult(boolean allowed, String request, String reason, User user, - Permission.Action action, byte[] table, byte[] family, byte[] qualifier) { + Permission.Action action, TableName table, byte[] family, byte[] qualifier) { this.allowed = allowed; this.request = request; this.reason = reason; @@ -60,7 +61,7 @@ public class AuthResult { } public AuthResult(boolean allowed, String request, String reason, User user, - Permission.Action action, byte[] table, + Permission.Action action, TableName table, Map> families) { this.allowed = allowed; this.request = request; @@ -85,7 +86,7 @@ public class AuthResult { return reason; } - public byte[] getTable() { + public TableName getTableName() { return table; } @@ -152,7 +153,7 @@ public class AuthResult { .append(user != null ? user.getName() : "UNKNOWN") .append(", "); sb.append("scope=") - .append(table == null ? "GLOBAL" : Bytes.toString(table)) + .append(table == null ? "GLOBAL" : table) .append(", "); sb.append("family=") .append(toFamilyString()) @@ -168,23 +169,23 @@ public class AuthResult { } public static AuthResult allow(String request, String reason, User user, - Permission.Action action, byte[] table, byte[] family, byte[] qualifier) { + Permission.Action action, TableName table, byte[] family, byte[] qualifier) { return new AuthResult(true, request, reason, user, action, table, family, qualifier); } public static AuthResult allow(String request, String reason, User user, - Permission.Action action, byte[] table, + Permission.Action action, TableName table, Map> families) { return new AuthResult(true, request, reason, user, action, table, families); } public static AuthResult deny(String request, String reason, User user, - Permission.Action action, byte[] table, byte[] family, byte[] qualifier) { + Permission.Action action, TableName table, byte[] family, byte[] qualifier) { return new AuthResult(false, request, reason, user, action, table, family, qualifier); } public static AuthResult deny(String request, String reason, User user, - Permission.Action action, byte[] table, + Permission.Action action, TableName table, Map> families) { return new AuthResult(false, request, reason, user, action, table, families); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java index 664f863..1327779 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.coprocessor.CoprocessorService; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; @@ -150,7 +151,8 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService try { getAccessController().prePrepareBulkLoad(env); String bulkToken = createStagingDir(baseStagingDir, - getActiveUser(), request.getTableName().toByteArray()).toString(); + getActiveUser(), + TableName.valueOf(request.getTableName().toByteArray())).toString(); done.run(PrepareBulkLoadResponse.newBuilder().setBulkToken(bulkToken).build()); } catch (IOException e) { ResponseConverter.setControllerException(controller, e); @@ -166,7 +168,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService getAccessController().preCleanupBulkLoad(env); fs.delete(createStagingDir(baseStagingDir, getActiveUser(), - env.getRegion().getTableDesc().getName(), + env.getRegion().getTableDesc().getTableName(), new Path(request.getBulkToken()).getName()), true); done.run(CleanupBulkLoadResponse.newBuilder().build()); @@ -260,15 +262,17 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService .getCoprocessorHost().findCoprocessor(AccessController.class.getName()); } - private Path createStagingDir(Path baseDir, User user, byte[] tableName) throws IOException { - String randomDir = user.getShortName()+"__"+Bytes.toString(tableName)+"__"+ + private Path createStagingDir(Path baseDir, + User user, + TableName tableName) throws IOException { + String randomDir = user.getShortName()+"__"+ tableName +"__"+ (new BigInteger(RANDOM_WIDTH, random).toString(RANDOM_RADIX)); return createStagingDir(baseDir, user, tableName, randomDir); } private Path createStagingDir(Path baseDir, User user, - byte[] tableName, + TableName tableName, String randomDir) throws IOException { Path p = new Path(baseDir, randomDir); fs.mkdirs(p, PERM_ALL_ACCESS); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java index 7b89180..e99f69a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java @@ -24,6 +24,7 @@ import com.google.common.collect.Lists; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.security.User; @@ -92,8 +93,8 @@ public class TableAuthManager { /** Cache of global permissions */ private volatile PermissionCache globalCache; - private ConcurrentSkipListMap> tableCache = - new ConcurrentSkipListMap>(Bytes.BYTES_COMPARATOR); + private ConcurrentSkipListMap> tableCache = + new ConcurrentSkipListMap>(); private Configuration conf; private ZKPermissionWatcher zkperms; @@ -146,7 +147,8 @@ public class TableAuthManager { return this.zkperms; } - public void refreshCacheFromWritable(byte[] table, byte[] data) throws IOException { + public void refreshCacheFromWritable(TableName table, + byte[] data) throws IOException { if (data != null && data.length > 0) { ListMultimap perms; try { @@ -156,7 +158,7 @@ public class TableAuthManager { } if (perms != null) { - if (Bytes.equals(table, AccessControlLists.ACL_GLOBAL_NAME)) { + if (Bytes.equals(table.getName(), AccessControlLists.ACL_GLOBAL_NAME)) { updateGlobalCache(perms); } else { updateTableCache(table, perms); @@ -199,7 +201,8 @@ public class TableAuthManager { * @param table * @param tablePerms */ - private void updateTableCache(byte[] table, ListMultimap tablePerms) { + private void updateTableCache(TableName table, + ListMultimap tablePerms) { PermissionCache newTablePerms = new PermissionCache(); for (Map.Entry entry : tablePerms.entries()) { @@ -213,7 +216,7 @@ public class TableAuthManager { tableCache.put(table, newTablePerms); } - private PermissionCache getTablePermissions(byte[] table) { + private PermissionCache getTablePermissions(TableName table) { if (!tableCache.containsKey(table)) { tableCache.putIfAbsent(table, new PermissionCache()); } @@ -267,13 +270,15 @@ public class TableAuthManager { return false; } - private boolean authorize(List perms, byte[] table, byte[] family, - Permission.Action action) { + private boolean authorize(List perms, + TableName table, byte[] family, + Permission.Action action) { return authorize(perms, table, family, null, action); } - private boolean authorize(List perms, byte[] table, byte[] family, - byte[] qualifier, Permission.Action action) { + private boolean authorize(List perms, + TableName table, byte[] family, + byte[] qualifier, Permission.Action action) { if (perms != null) { for (TablePermission p : perms) { if (p.implies(table, family, qualifier, action)) { @@ -281,12 +286,12 @@ public class TableAuthManager { } } } else if (LOG.isDebugEnabled()) { - LOG.debug("No permissions found for table="+Bytes.toStringBinary(table)); + LOG.debug("No permissions found for table="+table); } return false; } - public boolean authorize(User user, byte[] table, KeyValue kv, + public boolean authorize(User user, TableName table, KeyValue kv, Permission.Action action) { PermissionCache tablePerms = tableCache.get(table); if (tablePerms != null) { @@ -308,7 +313,7 @@ public class TableAuthManager { return false; } - private boolean authorize(List perms, byte[] table, KeyValue kv, + private boolean authorize(List perms, TableName table, KeyValue kv, Permission.Action action) { if (perms != null) { for (TablePermission p : perms) { @@ -318,7 +323,7 @@ public class TableAuthManager { } } else if (LOG.isDebugEnabled()) { LOG.debug("No permissions for authorize() check, table=" + - Bytes.toStringBinary(table)); + table); } return false; @@ -342,18 +347,18 @@ public class TableAuthManager { * @param action * @return true if known and authorized, false otherwise */ - public boolean authorizeUser(String username, byte[] table, byte[] family, + public boolean authorizeUser(String username, TableName table, byte[] family, Permission.Action action) { return authorizeUser(username, table, family, null, action); } - public boolean authorizeUser(String username, byte[] table, byte[] family, + public boolean authorizeUser(String username, TableName table, byte[] family, byte[] qualifier, Permission.Action action) { // global authorization supercedes table level if (authorizeUser(username, action)) { return true; } - if (table == null) table = AccessControlLists.ACL_TABLE_NAME; + if (table == null) table = AccessControlLists.ACL_TABLE; return authorize(getTablePermissions(table).getUser(username), table, family, qualifier, action); } @@ -376,17 +381,17 @@ public class TableAuthManager { * @param action * @return true if known and authorized, false otherwise */ - public boolean authorizeGroup(String groupName, byte[] table, byte[] family, + public boolean authorizeGroup(String groupName, TableName table, byte[] family, Permission.Action action) { // global authorization supercedes table level if (authorizeGroup(groupName, action)) { return true; } - if (table == null) table = AccessControlLists.ACL_TABLE_NAME; + if (table == null) table = AccessControlLists.ACL_TABLE; return authorize(getTablePermissions(table).getGroup(groupName), table, family, action); } - public boolean authorize(User user, byte[] table, byte[] family, + public boolean authorize(User user, TableName table, byte[] family, byte[] qualifier, Permission.Action action) { if (authorizeUser(user.getShortName(), table, family, qualifier, action)) { return true; @@ -403,7 +408,7 @@ public class TableAuthManager { return false; } - public boolean authorize(User user, byte[] table, byte[] family, + public boolean authorize(User user, TableName table, byte[] family, Permission.Action action) { return authorize(user, table, family, null, action); } @@ -415,7 +420,7 @@ public class TableAuthManager { * authorize() on the same column family would return true. */ public boolean matchPermission(User user, - byte[] table, byte[] family, Permission.Action action) { + TableName table, byte[] family, Permission.Action action) { PermissionCache tablePerms = tableCache.get(table); if (tablePerms != null) { List userPerms = tablePerms.getUser(user.getShortName()); @@ -446,7 +451,7 @@ public class TableAuthManager { } public boolean matchPermission(User user, - byte[] table, byte[] family, byte[] qualifier, + TableName table, byte[] family, byte[] qualifier, Permission.Action action) { PermissionCache tablePerms = tableCache.get(table); if (tablePerms != null) { @@ -477,6 +482,10 @@ public class TableAuthManager { } public void remove(byte[] table) { + remove(TableName.valueOf(table)); + } + + public void remove(TableName table) { tableCache.remove(table); } @@ -487,7 +496,7 @@ public class TableAuthManager { * @param table * @param perms */ - public void setUserPermissions(String username, byte[] table, + public void setUserPermissions(String username, TableName table, List perms) { PermissionCache tablePerms = getTablePermissions(table); tablePerms.replaceUser(username, perms); @@ -501,14 +510,14 @@ public class TableAuthManager { * @param table * @param perms */ - public void setGroupPermissions(String group, byte[] table, + public void setGroupPermissions(String group, TableName table, List perms) { PermissionCache tablePerms = getTablePermissions(table); tablePerms.replaceGroup(group, perms); writeToZooKeeper(table, tablePerms); } - public void writeToZooKeeper(byte[] table, + public void writeToZooKeeper(TableName table, PermissionCache tablePerms) { byte[] serialized = new byte[0]; if (tablePerms != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java index 23437fa..be4538d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.security.access; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; @@ -92,10 +93,10 @@ public class ZKPermissionWatcher extends ZooKeeperListener { public void nodeDataChanged(String path) { if (aclZNode.equals(ZKUtil.getParent(path))) { // update cache on an existing table node - String table = ZKUtil.getNodeName(path); + TableName table = TableName.valueOf(ZKUtil.getNodeName(path)); try { byte[] data = ZKUtil.getDataAndWatch(watcher, path); - authManager.refreshCacheFromWritable(Bytes.toBytes(table), data); + authManager.refreshCacheFromWritable(table, data); } catch (KeeperException ke) { LOG.error("Error reading data from zookeeper for node "+table, ke); // only option is to abort @@ -125,14 +126,14 @@ public class ZKPermissionWatcher extends ZooKeeperListener { for (ZKUtil.NodeAndData n : nodes) { if (n.isEmpty()) continue; String path = n.getNode(); - String table = ZKUtil.getNodeName(path); + TableName table = TableName.valueOf(ZKUtil.getNodeName(path)); try { byte[] nodeData = n.getData(); if (LOG.isDebugEnabled()) { LOG.debug("Updating permissions cache from node "+table+" with data: "+ Bytes.toStringBinary(nodeData)); } - authManager.refreshCacheFromWritable(Bytes.toBytes(table), nodeData); + authManager.refreshCacheFromWritable(table, nodeData); } catch (IOException ioe) { LOG.error("Failed parsing permissions for table '" + table + "' from zk", ioe); @@ -145,16 +146,16 @@ public class ZKPermissionWatcher extends ZooKeeperListener { * @param tableName * @param permsData */ - public void writeToZookeeper(byte[] tableName, byte[] permsData) { + public void writeToZookeeper(TableName tableName, byte[] permsData) { String zkNode = ZKUtil.joinZNode(watcher.baseZNode, ACL_NODE); - zkNode = ZKUtil.joinZNode(zkNode, Bytes.toString(tableName)); + zkNode = ZKUtil.joinZNode(zkNode, tableName.getNameAsString()); try { ZKUtil.createWithParents(watcher, zkNode); ZKUtil.updateExistingNodeData(watcher, zkNode, permsData, -1); } catch (KeeperException e) { LOG.error("Failed updating permissions for table '" + - Bytes.toString(tableName) + "'", e); + tableName + "'", e); watcher.abort("Failed writing node "+zkNode+" to zookeeper", e); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java index e0a6d19..a2f6222 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java @@ -27,6 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -52,7 +53,7 @@ public class TokenUtil { Configuration conf) throws IOException { HTable meta = null; try { - meta = new HTable(conf, ".META."); + meta = new HTable(conf, TableName.META_TABLE_NAME); CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW); AuthenticationProtos.AuthenticationService.BlockingInterface service = AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index 9e5e3d4..e8c67fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -39,11 +39,13 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.HLogLink; import org.apache.hadoop.hbase.mapreduce.JobUtil; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -149,10 +151,12 @@ public final class ExportSnapshot extends Configured implements Tool { Path path; if (HFileLink.isHFileLink(inputPath) || StoreFileInfo.isReference(inputPath)) { String family = inputPath.getParent().getName(); - String table = HFileLink.getReferencedTableName(inputPath.getName()); + TableName table = + HFileLink.getReferencedTableName(inputPath.getName()); String region = HFileLink.getReferencedRegionName(inputPath.getName()); String hfile = HFileLink.getReferencedHFileName(inputPath.getName()); - path = new Path(table, new Path(region, new Path(family, hfile))); + path = new Path(FSUtils.getTableDir(new Path("./"), table), + new Path(region, new Path(family, hfile))); } else if (isHLogLinkPath(inputPath)) { String logName = inputPath.getName(); path = new Path(new Path(outputRoot, HConstants.HREGION_OLDLOGDIR_NAME), logName); @@ -372,7 +376,8 @@ public final class ExportSnapshot extends Configured implements Tool { SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); final List> files = new ArrayList>(); - final String table = snapshotDesc.getTable(); + final TableName table = + TableName.valueOf(snapshotDesc.getTable()); final Configuration conf = getConf(); // Get snapshot files diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java index 41fdbc3..11cd5b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java @@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -45,6 +46,7 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; @@ -107,25 +109,33 @@ public class RestoreSnapshotHelper { private final MonitoredTask status; private final SnapshotDescription snapshotDesc; + private final TableName snapshotTable; private final Path snapshotDir; private final HTableDescriptor tableDesc; + private final Path rootDir; private final Path tableDir; private final Configuration conf; private final FileSystem fs; - public RestoreSnapshotHelper(final Configuration conf, final FileSystem fs, - final SnapshotDescription snapshotDescription, final Path snapshotDir, - final HTableDescriptor tableDescriptor, final Path tableDir, - final ForeignExceptionDispatcher monitor, final MonitoredTask status) + public RestoreSnapshotHelper(final Configuration conf, + final FileSystem fs, + final SnapshotDescription snapshotDescription, + final Path snapshotDir, + final HTableDescriptor tableDescriptor, + final Path rootDir, + final ForeignExceptionDispatcher monitor, + final MonitoredTask status) { this.fs = fs; this.conf = conf; this.snapshotDesc = snapshotDescription; + this.snapshotTable = TableName.valueOf(snapshotDescription.getTable()); this.snapshotDir = snapshotDir; this.tableDesc = tableDescriptor; - this.tableDir = tableDir; + this.rootDir = rootDir; + this.tableDir = FSUtils.getTableDir(rootDir, tableDesc.getTableName()); this.monitor = monitor; this.status = status; } @@ -311,7 +321,7 @@ public class RestoreSnapshotHelper { Map> snapshotFiles = SnapshotReferenceUtil.getRegionHFileReferences(fs, snapshotRegionDir); Path regionDir = new Path(tableDir, regionInfo.getEncodedName()); - String tableName = tableDesc.getNameAsString(); + String tableName = tableDesc.getTableName().getNameAsString(); // Restore families present in the table for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) { @@ -412,7 +422,7 @@ public class RestoreSnapshotHelper { } // create the regions on disk - ModifyRegionUtils.createRegions(conf, tableDir.getParent(), + ModifyRegionUtils.createRegions(conf, rootDir, tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() { public void fillRegion(final HRegion region) throws IOException { cloneRegion(region, snapshotRegions.get(region.getRegionInfo().getEncodedName())); @@ -437,7 +447,7 @@ public class RestoreSnapshotHelper { throws IOException { final Path snapshotRegionDir = new Path(snapshotDir, snapshotRegionInfo.getEncodedName()); final Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName()); - final String tableName = tableDesc.getNameAsString(); + final String tableName = tableDesc.getTableName().getNameAsString(); SnapshotReferenceUtil.visitRegionStoreFiles(fs, snapshotRegionDir, new FSVisitor.StoreFileVisitor() { public void storeFile (final String region, final String family, final String hfile) @@ -493,9 +503,9 @@ public class RestoreSnapshotHelper { private void restoreReferenceFile(final Path familyDir, final HRegionInfo regionInfo, final String hfileName) throws IOException { // Extract the referred information (hfile name and parent region) - String tableName = snapshotDesc.getTable(); - Path refPath = StoreFileInfo.getReferredToFile(new Path(new Path(new Path(tableName, - regionInfo.getEncodedName()), familyDir.getName()), hfileName)); + Path refPath = StoreFileInfo.getReferredToFile(new Path(new Path(new Path( + snapshotTable.getNameAsString(), regionInfo.getEncodedName()), familyDir.getName()), + hfileName)); String snapshotRegionName = refPath.getParent().getParent().getName(); String fileName = refPath.getName(); @@ -506,13 +516,13 @@ public class RestoreSnapshotHelper { // The output file should be a reference link table=snapshotRegion-fileName.clonedRegionName String refLink = fileName; if (!HFileLink.isHFileLink(fileName)) { - refLink = HFileLink.createHFileLinkName(tableName, snapshotRegionName, fileName); + refLink = HFileLink.createHFileLinkName(snapshotTable, snapshotRegionName, fileName); } Path outPath = new Path(familyDir, refLink + '.' + clonedRegionName); // Create the new reference Path linkPath = new Path(familyDir, - HFileLink.createHFileLinkName(tableName, regionInfo.getEncodedName(), hfileName)); + HFileLink.createHFileLinkName(snapshotTable, regionInfo.getEncodedName(), hfileName)); InputStream in = new HFileLink(conf, linkPath).open(fs); OutputStream out = fs.create(outPath); IOUtils.copyBytes(in, out, conf); @@ -527,7 +537,7 @@ public class RestoreSnapshotHelper { * @return the new HRegion instance */ public HRegionInfo cloneRegionInfo(final HRegionInfo snapshotRegionInfo) { - return new HRegionInfo(tableDesc.getName(), + return new HRegionInfo(tableDesc.getTableName(), snapshotRegionInfo.getStartKey(), snapshotRegionInfo.getEndKey(), snapshotRegionInfo.isSplit(), snapshotRegionInfo.getRegionId()); } @@ -543,7 +553,7 @@ public class RestoreSnapshotHelper { */ private void restoreWALs() throws IOException { final SnapshotLogSplitter logSplitter = new SnapshotLogSplitter(conf, fs, tableDir, - Bytes.toBytes(snapshotDesc.getTable()), regionsMap); + snapshotTable, regionsMap); try { // Recover.Edits SnapshotReferenceUtil.visitRecoveredEdits(fs, snapshotDir, @@ -578,7 +588,8 @@ public class RestoreSnapshotHelper { HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir.getPath()); regions.add(hri); } - LOG.debug("found " + regions.size() + " regions for table=" + tableDesc.getNameAsString()); + LOG.debug("found " + regions.size() + " regions for table=" + + tableDesc.getTableName().getNameAsString()); return regions; } @@ -591,7 +602,7 @@ public class RestoreSnapshotHelper { * @throws IOException */ public static HTableDescriptor cloneTableSchema(final HTableDescriptor snapshotTableDescriptor, - final byte[] tableName) throws IOException { + final TableName tableName) throws IOException { HTableDescriptor htd = new HTableDescriptor(tableName); for (HColumnDescriptor hcd: snapshotTableDescriptor.getColumnFamilies()) { htd.addFamily(hcd); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java index f2c7ceb..34d3224 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.snapshot; import java.io.IOException; import java.util.Collections; +import com.google.protobuf.ByteString; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -29,6 +30,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; @@ -270,11 +272,11 @@ public class SnapshotDescriptionUtils { } /** - * Read in the {@link SnapshotDescription} stored for the snapshot in the passed directory + * Read in the {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} stored for the snapshot in the passed directory * @param fs filesystem where the snapshot was taken * @param snapshotDir directory where the snapshot was stored * @return the stored snapshot description - * @throws org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException if the + * @throws CorruptedSnapshotException if the * snapshot cannot be read */ public static SnapshotDescription readSnapshotInfo(FileSystem fs, Path snapshotDir) @@ -284,7 +286,8 @@ public class SnapshotDescriptionUtils { FSDataInputStream in = null; try { in = fs.open(snapshotInfo); - return SnapshotDescription.parseFrom(in); + SnapshotDescription desc = SnapshotDescription.parseFrom(in); + return desc; } finally { if (in != null) in.close(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java index efcc184..580bded 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java @@ -21,8 +21,6 @@ package org.apache.hadoop.hbase.snapshot; import java.io.IOException; import java.io.FileNotFoundException; import java.text.SimpleDateFormat; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; import java.util.Date; import org.apache.commons.logging.Log; @@ -33,6 +31,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -43,9 +43,6 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.HLogLink; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; -import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; -import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSTableDescriptors; @@ -108,12 +105,14 @@ public final class SnapshotInfo extends Configured implements Tool { private long logSize = 0; private final SnapshotDescription snapshot; + private final TableName snapshotTable; private final Configuration conf; private final FileSystem fs; SnapshotStats(final Configuration conf, final FileSystem fs, final SnapshotDescription snapshot) { this.snapshot = snapshot; + this.snapshotTable = TableName.valueOf(snapshot.getTable()); this.conf = conf; this.fs = fs; } @@ -187,7 +186,7 @@ public final class SnapshotInfo extends Configured implements Tool { */ FileInfo addStoreFile(final String region, final String family, final String hfile) throws IOException { - String table = this.snapshot.getTable(); + TableName table = snapshotTable; Path path = new Path(family, HFileLink.createHFileLinkName(table, region, hfile)); HFileLink link = new HFileLink(conf, path); boolean inArchive = false; @@ -330,7 +329,7 @@ public final class SnapshotInfo extends Configured implements Tool { System.out.println("----------------------------------------"); System.out.println(" Name: " + snapshotDesc.getName()); System.out.println(" Type: " + snapshotDesc.getType()); - System.out.println(" Table: " + snapshotDesc.getTable()); + System.out.println(" Table: " + snapshotTableDesc.getTableName().getNameAsString()); System.out.println(" Format: " + snapshotDesc.getVersion()); System.out.println("Created: " + df.format(new Date(snapshotDesc.getCreationTime()))); System.out.println(); @@ -357,7 +356,7 @@ public final class SnapshotInfo extends Configured implements Tool { } // Collect information about hfiles and logs in the snapshot - final String table = this.snapshotDesc.getTable(); + final String table = snapshotTableDesc.getTableName().getNameAsString(); final SnapshotStats stats = new SnapshotStats(this.getConf(), this.fs, this.snapshotDesc); SnapshotReferenceUtil.visitReferencedFiles(fs, snapshotDir, new SnapshotReferenceUtil.FileVisitor() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotLogSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotLogSplitter.java index 8df1a7b..219e5aa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotLogSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotLogSplitter.java @@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.HLogLink; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.wal.HLog; @@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; /** * If the snapshot has references to one or more log files, @@ -95,8 +97,8 @@ class SnapshotLogSplitter implements Closeable { private final Map regionsMap; private final Configuration conf; - private final byte[] snapshotTableName; - private final byte[] tableName; + private final TableName snapshotTableName; + private final TableName tableName; private final Path tableDir; private final FileSystem fs; @@ -105,11 +107,11 @@ class SnapshotLogSplitter implements Closeable { * @params regionsMap maps original region names to the new ones. */ public SnapshotLogSplitter(final Configuration conf, final FileSystem fs, - final Path tableDir, final byte[] snapshotTableName, + final Path tableDir, final TableName snapshotTableName, final Map regionsMap) { this.regionsMap = regionsMap; this.snapshotTableName = snapshotTableName; - this.tableName = Bytes.toBytes(tableDir.getName()); + this.tableName = FSUtils.getTableName(tableDir); this.tableDir = tableDir; this.conf = conf; this.fs = fs; @@ -123,15 +125,15 @@ class SnapshotLogSplitter implements Closeable { public void splitLog(final String serverName, final String logfile) throws IOException { LOG.debug("Restore log=" + logfile + " server=" + serverName + - " for snapshotTable=" + Bytes.toString(snapshotTableName) + - " to table=" + Bytes.toString(tableName)); + " for snapshotTable=" + snapshotTableName + + " to table=" + tableName); splitLog(new HLogLink(conf, serverName, logfile).getAvailablePath(fs)); } public void splitRecoveredEdit(final Path editPath) throws IOException { LOG.debug("Restore recover.edits=" + editPath + - " for snapshotTable=" + Bytes.toString(snapshotTableName) + - " to table=" + Bytes.toString(tableName)); + " for snapshotTable=" + snapshotTableName + + " to table=" + tableName); splitLog(editPath); } @@ -154,7 +156,7 @@ class SnapshotLogSplitter implements Closeable { HLogKey key = entry.getKey(); // We're interested only in the snapshot table that we're restoring - if (!Bytes.equals(key.getTablename(), snapshotTableName)) continue; + if (!key.getTablename().equals(snapshotTableName)) continue; // Writer for region. if (!Bytes.equals(regionName, key.getEncodedRegionName())) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java index 16914d2..ec50b71 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/TableInfoCopyTask.java @@ -24,7 +24,9 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.util.FSTableDescriptors; @@ -60,9 +62,8 @@ public class TableInfoCopyTask extends SnapshotTask { LOG.debug("Attempting to copy table info for snapshot:" + ClientSnapshotDescriptionUtils.toString(this.snapshot)); // get the HTable descriptor - HTableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, - this.snapshot.getTable()); + TableName.valueOf(this.snapshot.getTable())); this.rethrowException(); // write a copy of descriptor to the snapshot directory Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java index 378a012..ee4d947 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; @@ -593,7 +594,7 @@ public class ThriftServerRunner implements Runnable { HTableDescriptor[] tables = this.getHBaseAdmin().listTables(); ArrayList list = new ArrayList(tables.length); for (int i = 0; i < tables.length; i++) { - list.add(ByteBuffer.wrap(tables[i].getName())); + list.add(ByteBuffer.wrap(tables[i].getTableName().getName())); } return list; } catch (IOException e) { @@ -931,7 +932,7 @@ public class ThriftServerRunner implements Runnable { if (getHBaseAdmin().tableExists(tableName)) { throw new AlreadyExists("table name already in use"); } - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); for (ColumnDescriptor col : columnFamilies) { HColumnDescriptor colDesc = ThriftUtilities.colDescFromThrift(col); desc.addFamily(colDesc); @@ -1378,13 +1379,13 @@ public class ThriftServerRunner implements Runnable { @Override public TRegionInfo getRegionInfo(ByteBuffer searchRow) throws IOError { try { - HTable table = getTable(HConstants.META_TABLE_NAME); + HTable table = getTable(TableName.META_TABLE_NAME.getName()); byte[] row = getBytes(searchRow); Result startRowResult = table.getRowOrBefore( row, HConstants.CATALOG_FAMILY); if (startRowResult == null) { - throw new IOException("Cannot find row in .META., row=" + throw new IOException("Cannot find row in "+ TableName.META_TABLE_NAME+", row=" + Bytes.toStringBinary(row)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index 9b3c681..5239e1f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -23,6 +23,7 @@ import org.apache.commons.lang.time.StopWatch; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -163,7 +164,7 @@ public final class Canary implements Tool { if (tables_index >= 0) { for (int i = tables_index; i < args.length; i++) { - sniff(admin, sink, args[i]); + sniff(admin, sink, TableName.valueOf(args[i])); } } else { sniff(); @@ -202,7 +203,7 @@ public final class Canary implements Tool { * @param tableName * @throws Exception */ - public static void sniff(final HBaseAdmin admin, String tableName) + public static void sniff(final HBaseAdmin admin, TableName tableName) throws Exception { sniff(admin, new StdOutSink(), tableName); } @@ -214,10 +215,10 @@ public final class Canary implements Tool { * @param tableName * @throws Exception */ - private static void sniff(final HBaseAdmin admin, final Sink sink, String tableName) + private static void sniff(final HBaseAdmin admin, final Sink sink, TableName tableName) throws Exception { if (admin.isTableAvailable(tableName)) { - sniff(admin, sink, admin.getTableDescriptor(tableName.getBytes())); + sniff(admin, sink, admin.getTableDescriptor(tableName)); } else { LOG.warn(String.format("Table %s is not available", tableName)); } @@ -232,12 +233,12 @@ public final class Canary implements Tool { HTable table = null; try { - table = new HTable(admin.getConfiguration(), tableDesc.getName()); + table = new HTable(admin.getConfiguration(), tableDesc.getTableName()); } catch (TableNotFoundException e) { return; } - for (HRegionInfo region : admin.getTableRegions(tableDesc.getName())) { + for (HRegionInfo region : admin.getTableRegions(tableDesc.getTableName())) { try { sniffRegion(admin, sink, region, table); } catch (Exception e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptorMigrationToSubdir.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptorMigrationToSubdir.java index 6ff1c90..05fa1fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptorMigrationToSubdir.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptorMigrationToSubdir.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; /** @@ -53,8 +54,8 @@ public class FSTableDescriptorMigrationToSubdir { * migrated. */ private static boolean needsMigration(FileSystem fs, Path rootDir) throws IOException { - Path metaTableDir = FSTableDescriptors.getTableDirectory(rootDir, - Bytes.toString(HConstants.META_TABLE_NAME)); + Path metaTableDir = FSUtils.getTableDir(rootDir, + TableName.META_TABLE_NAME); FileStatus metaTableInfoStatus = FSTableDescriptors.getTableInfoPath(fs, metaTableDir); return metaTableInfoStatus == null; @@ -86,14 +87,13 @@ public class FSTableDescriptorMigrationToSubdir { } LOG.info("Migrating system tables"); - migrateTableIfExists(fs, rootDir, HConstants.ROOT_TABLE_NAME); // migrate meta last because that's what we check to see if migration is complete - migrateTableIfExists(fs, rootDir, HConstants.META_TABLE_NAME); + migrateTableIfExists(fs, rootDir, TableName.META_TABLE_NAME); } - private static void migrateTableIfExists(FileSystem fs, Path rootDir, byte[] tableName) + private static void migrateTableIfExists(FileSystem fs, Path rootDir, TableName tableName) throws IOException { - Path tableDir = FSTableDescriptors.getTableDirectory(rootDir, Bytes.toString(tableName)); + Path tableDir = FSUtils.getTableDir(rootDir, tableName); if (fs.exists(tableDir)) { migrateTable(fs, tableDir); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index ed175fa..4d33044 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -38,11 +38,12 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableInfoMissingException; -import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import com.google.common.annotations.VisibleForTesting; @@ -84,8 +85,8 @@ public class FSTableDescriptors implements TableDescriptors { // This cache does not age out the old stuff. Thinking is that the amount // of data we keep up in here is so small, no need to do occasional purge. // TODO. - private final Map cache = - new ConcurrentHashMap(); + private final Map cache = + new ConcurrentHashMap(); /** * Data structure to hold modification time and table descriptor. @@ -140,32 +141,20 @@ public class FSTableDescriptors implements TableDescriptors { * to see if a newer file has been created since the cached one was read. */ @Override - public HTableDescriptor get(final byte [] tablename) - throws IOException { - return get(Bytes.toString(tablename)); - } - - /** - * Get the current table descriptor for the given table, or null if none exists. - * - * Uses a local cache of the descriptor but still checks the filesystem on each call - * to see if a newer file has been created since the cached one was read. - */ - @Override - public HTableDescriptor get(final String tablename) + public HTableDescriptor get(final TableName tablename) throws IOException { invocations++; - if (HTableDescriptor.ROOT_TABLEDESC.getNameAsString().equals(tablename)) { + if (HTableDescriptor.ROOT_TABLEDESC.getTableName().equals(tablename)) { cachehits++; return HTableDescriptor.ROOT_TABLEDESC; } - if (HTableDescriptor.META_TABLEDESC.getNameAsString().equals(tablename)) { + if (HTableDescriptor.META_TABLEDESC.getTableName().equals(tablename)) { cachehits++; return HTableDescriptor.META_TABLEDESC; } // .META. and -ROOT- is already handled. If some one tries to get the descriptor for // .logs, .oldlogs or .corrupt throw an exception. - if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename)) { + if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) { throw new IOException("No descriptor found for non table = " + tablename); } @@ -212,14 +201,36 @@ public class FSTableDescriptors implements TableDescriptors { for (Path d: tableDirs) { HTableDescriptor htd = null; try { + htd = get(FSUtils.getTableName(d)); + } catch (FileNotFoundException fnfe) { + // inability of retrieving one HTD shouldn't stop getting the remaining + LOG.warn("Trouble retrieving htd", fnfe); + } + if (htd == null) continue; + htds.put(htd.getTableName().getNameAsString(), htd); + } + return htds; + } - htd = get(d.getName()); + /* (non-Javadoc) + * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path) + */ + @Override + public Map getByNamespace(String name) + throws IOException { + Map htds = new TreeMap(); + List tableDirs = + FSUtils.getLocalTableDirs(fs, FSUtils.getNamespaceDir(rootdir, name)); + for (Path d: tableDirs) { + HTableDescriptor htd = null; + try { + htd = get(FSUtils.getTableName(d)); } catch (FileNotFoundException fnfe) { // inability of retrieving one HTD shouldn't stop getting the remaining LOG.warn("Trouble retrieving htd", fnfe); } if (htd == null) continue; - htds.put(d.getName(), htd); + htds.put(FSUtils.getTableName(d).getNameAsString(), htd); } return htds; } @@ -233,19 +244,16 @@ public class FSTableDescriptors implements TableDescriptors { if (fsreadonly) { throw new NotImplementedException("Cannot add a table descriptor - in read only mode"); } - if (Bytes.equals(HConstants.ROOT_TABLE_NAME, htd.getName())) { + if (TableName.META_TABLE_NAME.equals(htd.getTableName())) { throw new NotImplementedException(); } - if (Bytes.equals(HConstants.META_TABLE_NAME, htd.getName())) { - throw new NotImplementedException(); - } - if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) { + if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getTableName().getNameAsString())) { throw new NotImplementedException( "Cannot add a table descriptor for a reserved subdirectory name: " + htd.getNameAsString()); } updateTableDescriptor(htd); - long modtime = getTableInfoModtime(htd.getNameAsString()); - this.cache.put(htd.getNameAsString(), new TableDescriptorAndModtime(modtime, htd)); + long modtime = getTableInfoModtime(htd.getTableName()); + this.cache.put(htd.getTableName(), new TableDescriptorAndModtime(modtime, htd)); } /** @@ -254,12 +262,12 @@ public class FSTableDescriptors implements TableDescriptors { * from the FileSystem. */ @Override - public HTableDescriptor remove(final String tablename) + public HTableDescriptor remove(final TableName tablename) throws IOException { if (fsreadonly) { throw new NotImplementedException("Cannot remove a table descriptor - in read only mode"); } - Path tabledir = getTableDirectory(tablename); + Path tabledir = getTableDir(tablename); if (this.fs.exists(tabledir)) { if (!this.fs.delete(tabledir, true)) { throw new IOException("Failed delete of " + tabledir.toString()); @@ -276,7 +284,7 @@ public class FSTableDescriptors implements TableDescriptors { * @return true if exists * @throws IOException */ - public boolean isTableInfoExists(String tableName) throws IOException { + public boolean isTableInfoExists(TableName tableName) throws IOException { return getTableInfoPath(tableName) != null; } @@ -284,8 +292,8 @@ public class FSTableDescriptors implements TableDescriptors { * Find the most current table info file for the given table in the hbase root directory. * @return The file status of the current table info file or null if it does not exist */ - private FileStatus getTableInfoPath(final String tableName) throws IOException { - Path tableDir = getTableDirectory(tableName); + private FileStatus getTableInfoPath(final TableName tableName) throws IOException { + Path tableDir = getTableDir(tableName); return getTableInfoPath(tableDir); } @@ -384,17 +392,10 @@ public class FSTableDescriptors implements TableDescriptors { /** * Return the table directory in HDFS */ - @VisibleForTesting Path getTableDirectory(final String tableName) { - return getTableDirectory(rootdir, tableName); + @VisibleForTesting Path getTableDir(final TableName tableName) { + return FSUtils.getTableDir(rootdir, tableName); } - - /** - * Return the table directory in HDFS - */ - static Path getTableDirectory(Path rootDir, String tableName) { - return FSUtils.getTablePath(rootDir, tableName); - } - + private static final PathFilter TABLEINFO_PATHFILTER = new PathFilter() { @Override public boolean accept(Path p) { @@ -460,7 +461,7 @@ public class FSTableDescriptors implements TableDescriptors { * or 0 if no tableinfo file found. * @throws IOException */ - private long getTableInfoModtime(final String tableName) throws IOException { + private long getTableInfoModtime(final TableName tableName) throws IOException { FileStatus status = getTableInfoPath(tableName); return status == null ? 0 : status.getModificationTime(); } @@ -471,8 +472,8 @@ public class FSTableDescriptors implements TableDescriptors { * Returns null if it's not found. */ public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, - Path hbaseRootDir, String tableName) throws IOException { - Path tableDir = getTableDirectory(hbaseRootDir, tableName); + Path hbaseRootDir, TableName tableName) throws IOException { + Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName); return getTableDescriptorFromFs(fs, tableDir); } @@ -490,14 +491,14 @@ public class FSTableDescriptors implements TableDescriptors { return readTableDescriptor(fs, status, false); } - private TableDescriptorAndModtime getTableDescriptorAndModtime(String tableName) + private TableDescriptorAndModtime getTableDescriptorAndModtime(TableName tableName) throws IOException { // ignore both -ROOT- and .META. tables - if (Bytes.compareTo(Bytes.toBytes(tableName), HConstants.ROOT_TABLE_NAME) == 0 - || Bytes.compareTo(Bytes.toBytes(tableName), HConstants.META_TABLE_NAME) == 0) { + if (tableName.equals(TableName.ROOT_TABLE_NAME) + || tableName.equals(TableName.META_TABLE_NAME)) { return null; } - return getTableDescriptorAndModtime(getTableDirectory(tableName)); + return getTableDescriptorAndModtime(getTableDir(tableName)); } private TableDescriptorAndModtime getTableDescriptorAndModtime(Path tableDir) @@ -545,7 +546,7 @@ public class FSTableDescriptors implements TableDescriptors { if (fsreadonly) { throw new NotImplementedException("Cannot update a table descriptor - in read only mode"); } - Path tableDir = getTableDirectory(htd.getNameAsString()); + Path tableDir = getTableDir(htd.getTableName()); Path p = writeTableDescriptor(fs, htd, tableDir, getTableInfoPath(tableDir)); if (p == null) throw new IOException("Failed update"); LOG.info("Updated tableinfo=" + p); @@ -557,12 +558,12 @@ public class FSTableDescriptors implements TableDescriptors { * Used in unit tests only. * @throws NotImplementedException if in read only mode */ - public void deleteTableDescriptorIfExists(String tableName) throws IOException { + public void deleteTableDescriptorIfExists(TableName tableName) throws IOException { if (fsreadonly) { throw new NotImplementedException("Cannot delete a table descriptor - in read only mode"); } - Path tableDir = getTableDirectory(tableName); + Path tableDir = getTableDir(tableName); Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR); deleteTableDescriptorFiles(fs, tableInfoDir, Integer.MAX_VALUE); } @@ -683,7 +684,7 @@ public class FSTableDescriptors implements TableDescriptors { */ public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation) throws IOException { - Path tableDir = getTableDirectory(htd.getNameAsString()); + Path tableDir = getTableDir(htd.getTableName()); return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index e357f29..8ff111a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -32,6 +32,7 @@ import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.regex.Pattern; @@ -51,6 +52,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.ClusterId; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; @@ -927,17 +929,8 @@ public abstract class FSUtils { public static boolean isMajorCompacted(final FileSystem fs, final Path hbaseRootDir) throws IOException { - // Presumes any directory under hbase.rootdir is a table. - FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs)); - for (FileStatus tableDir : tableDirs) { - // Skip the .log directory. All others should be tables. Inside a table, - // there are compaction.dir directories to skip. Otherwise, all else - // should be regions. Then in each region, should only be family - // directories. Under each of these, should be one file only. - Path d = tableDir.getPath(); - if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) { - continue; - } + List tableDirs = getTableDirs(fs, hbaseRootDir); + for (Path d : tableDirs) { FileStatus[] regionDirs = fs.listStatus(d, new DirFilter(fs)); for (FileStatus regionDir : regionDirs) { Path dd = regionDir.getPath(); @@ -1012,17 +1005,8 @@ public abstract class FSUtils { int cfCountTotal = 0; int cfFragTotal = 0; DirFilter df = new DirFilter(fs); - // presumes any directory under hbase.rootdir is a table - FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df); - for (FileStatus tableDir : tableDirs) { - // Skip the .log directory. All others should be tables. Inside a table, - // there are compaction.dir directories to skip. Otherwise, all else - // should be regions. Then in each region, should only be family - // directories. Under each of these, should be one file only. - Path d = tableDir.getPath(); - if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) { - continue; - } + List tableDirs = getTableDirs(fs, hbaseRootDir); + for (Path d : tableDirs) { int cfCount = 0; int cfFrag = 0; FileStatus[] regionDirs = fs.listStatus(d, df); @@ -1046,7 +1030,8 @@ public abstract class FSUtils { } } // compute percentage per table and store in result list - frags.put(d.getName(), Math.round((float) cfFrag / cfCount * 100)); + frags.put(FSUtils.getTableName(d).getNameAsString(), + Math.round((float) cfFrag / cfCount * 100)); } // set overall percentage for all tables frags.put("-TOTAL-", Math.round((float) cfFragTotal / cfCountTotal * 100)); @@ -1083,13 +1068,12 @@ public abstract class FSUtils { final Path hbaseRootDir) throws IOException { // Presumes any directory under hbase.rootdir is a table. - FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs)); - for (FileStatus tableDir : tableDirs) { + List tableDirs = getTableDirs(fs, hbaseRootDir); + for (Path d: tableDirs) { // Inside a table, there are compaction.dir directories to skip. // Otherwise, all else should be regions. Then in each region, should // only be family directories. Under each of these, should be a mapfile // and info directory and in these only one file. - Path d = tableDir.getPath(); if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) { continue; } @@ -1135,6 +1119,45 @@ public abstract class FSUtils { } /** + * Returns the {@link org.apache.hadoop.fs.Path} object representing the table directory under + * path rootdir + * + * @param rootdir qualified path of HBase root directory + * @param tableName name of table + * @return {@link org.apache.hadoop.fs.Path} for table + */ + public static Path getTableDir(Path rootdir, final TableName tableName) { + return new Path(getNamespaceDir(rootdir, tableName.getNamespaceAsString()), + tableName.getQualifierAsString()); + } + + /** + * Returns the {@link org.apache.hadoop.hbase.TableName} object representing + * the table directory under + * path rootdir + * + * @param rootdir qualified path of HBase root directory + * @param tablePath path of table + * @return {@link org.apache.hadoop.fs.Path} for table + */ + public static TableName getTableName(Path tablePath) { + return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName()); + } + + /** + * Returns the {@link org.apache.hadoop.fs.Path} object representing + * the namespace directory under path rootdir + * + * @param rootdir qualified path of HBase root directory + * @param namespace namespace name + * @return {@link org.apache.hadoop.fs.Path} for table + */ + public static Path getNamespaceDir(Path rootdir, final String namespace) { + return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR, + new Path(namespace))); + } + + /** * A {@link PathFilter} that returns only regular files. */ static class FileFilter implements PathFilter { @@ -1210,7 +1233,7 @@ public abstract class FSUtils { public static class UserTableDirFilter extends BlackListDirFilter { public UserTableDirFilter(FileSystem fs) { - super(fs, HConstants.HBASE_NON_USER_TABLE_DIRS); + super(fs, HConstants.HBASE_NON_TABLE_DIRS); } } @@ -1269,15 +1292,27 @@ public abstract class FSUtils { public abstract void recoverFileLease(final FileSystem fs, final Path p, Configuration conf, CancelableProgressable reporter) throws IOException; + public static List getTableDirs(final FileSystem fs, final Path rootdir) + throws IOException { + List tableDirs = new LinkedList(); + + for(FileStatus status : + fs.globStatus(new Path(rootdir, + new Path(HConstants.BASE_NAMESPACE_DIR, "*")))) { + tableDirs.addAll(FSUtils.getLocalTableDirs(fs, status.getPath())); + } + return tableDirs; + } + /** * @param fs * @param rootdir * @return All the table directories under rootdir. Ignore non table hbase folders such as - * .logs, .oldlogs, .corrupt, .META., and -ROOT- folders. + * .logs, .oldlogs, .corrupt folders. * @throws IOException */ - public static List getTableDirs(final FileSystem fs, final Path rootdir) - throws IOException { + public static List getLocalTableDirs(final FileSystem fs, final Path rootdir) + throws IOException { // presumes any directory under hbase.rootdir is a table FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs)); List tabledirs = new ArrayList(dirs.length); @@ -1287,14 +1322,6 @@ public abstract class FSUtils { return tabledirs; } - public static Path getTablePath(Path rootdir, byte [] tableName) { - return getTablePath(rootdir, Bytes.toString(tableName)); - } - - public static Path getTablePath(Path rootdir, final String tableName) { - return new Path(rootdir, tableName); - } - /** * Filter for all dirs that don't start with '.' */ @@ -1444,19 +1471,19 @@ public abstract class FSUtils { * @param map map to add values. If null, this method will create and populate one to return * @param fs The file system to use. * @param hbaseRootDir The root directory to scan. - * @param tablename name of the table to scan. + * @param tableName name of the table to scan. * @return Map keyed by StoreFile name with a value of the full Path. * @throws IOException When scanning the directory fails. */ public static Map getTableStoreFilePathMap(Map map, - final FileSystem fs, final Path hbaseRootDir, byte[] tablename) + final FileSystem fs, final Path hbaseRootDir, TableName tableName) throws IOException { if (map == null) { map = new HashMap(); } // only include the directory paths to tables - Path tableDir = new Path(hbaseRootDir, Bytes.toString(tablename)); + Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName); // Inside a table, there are compaction.dir directories to skip. Otherwise, all else // should be regions. PathFilter df = new BlackListDirFilter(fs, HConstants.HBASE_NON_TABLE_DIRS); @@ -1505,13 +1532,11 @@ public abstract class FSUtils { // it was borrowed from it. // only include the directory paths to tables - PathFilter df = new BlackListDirFilter(fs, HConstants.HBASE_NON_TABLE_DIRS); - FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df); - for (FileStatus tableDir : tableDirs) { - byte[] tablename = Bytes.toBytes(tableDir.getPath().getName()); - getTableStoreFilePathMap(map, fs, hbaseRootDir, tablename); + for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) { + getTableStoreFilePathMap(map, fs, hbaseRootDir, + FSUtils.getTableName(tableDir)); } - return map; + return map; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index a750c93..f40d23c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -57,6 +57,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -202,7 +203,7 @@ public class HBaseFsck extends Configured implements Tool { // limit checking/fixes to listed tables, if empty attempt to check/fix all // .META. are always checked - private Set tablesIncluded = new HashSet(); + private Set tablesIncluded = new HashSet(); private int maxMerge = DEFAULT_MAX_MERGE; // maximum number of overlapping regions to merge private int maxOverlapsToSideline = DEFAULT_OVERLAPS_TO_SIDELINE; // maximum number of overlapping regions to sideline private boolean sidelineBigOverlaps = false; // sideline overlaps with >maxMerge regions @@ -225,8 +226,8 @@ public class HBaseFsck extends Configured implements Tool { * to detect and correct consistency (hdfs/meta/deployment) problems. */ private TreeMap regionInfoMap = new TreeMap(); - private TreeSet disabledTables = - new TreeSet(Bytes.BYTES_COMPARATOR); + private TreeSet disabledTables = + new TreeSet(); // Empty regioninfo qualifiers in .META. private Set emptyRegionInfoQualifiers = new HashSet(); @@ -240,14 +241,16 @@ public class HBaseFsck extends Configured implements Tool { * unless checkMetaOnly is specified, in which case, it contains only * the meta table */ - private SortedMap tablesInfo = new ConcurrentSkipListMap(); + private SortedMap tablesInfo = + new ConcurrentSkipListMap(); /** * When initially looking at HDFS, we attempt to find any orphaned data. */ private List orphanHdfsDirs = Collections.synchronizedList(new ArrayList()); - private Map> orphanTableDirs = new HashMap>(); + private Map> orphanTableDirs = + new HashMap>(); /** * Constructor @@ -288,7 +291,7 @@ public class HBaseFsck extends Configured implements Tool { */ public void connect() throws IOException { admin = new HBaseAdmin(getConf()); - meta = new HTable(getConf(), HConstants.META_TABLE_NAME); + meta = new HTable(getConf(), TableName.META_TABLE_NAME); status = admin.getClusterStatus(); connection = admin.getConnection(); } @@ -503,7 +506,7 @@ public class HBaseFsck extends Configured implements Tool { return ; } - String tableName = Bytes.toString(hi.getTableName()); + TableName tableName = hi.getTableName(); TableInfo tableInfo = tablesInfo.get(tableName); Preconditions.checkNotNull(tableInfo, "Table '" + tableName + "' not present!"); HTableDescriptor template = tableInfo.getHTD(); @@ -566,7 +569,7 @@ public class HBaseFsck extends Configured implements Tool { Bytes.toString(orphanRegionRange.getSecond()) + ")"); // create new region on hdfs. move data into place. - HRegionInfo hri = new HRegionInfo(template.getName(), orphanRegionRange.getFirst(), orphanRegionRange.getSecond()); + HRegionInfo hri = new HRegionInfo(template.getTableName(), orphanRegionRange.getFirst(), orphanRegionRange.getSecond()); LOG.info("Creating new region : " + hri); HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), hri, template); Path target = region.getRegionFileSystem().getRegionDir(); @@ -710,8 +713,7 @@ public class HBaseFsck extends Configured implements Tool { errors.detail("Number of Tables in flux: " + numSkipped.get()); } for (HTableDescriptor td : allTables) { - String tableName = td.getNameAsString(); - errors.detail(" Table: " + tableName + "\t" + + errors.detail(" Table: " + td.getTableName() + "\t" + (td.isReadOnly() ? "ro" : "rw") + "\t" + (td.isMetaRegion() ? "META" : " ") + "\t" + " families: " + td.getFamilies().size()); @@ -761,7 +763,8 @@ public class HBaseFsck extends Configured implements Tool { /** * Populate hbi's from regionInfos loaded from file system. */ - private SortedMap loadHdfsRegionInfos() throws IOException, InterruptedException { + private SortedMap loadHdfsRegionInfos() + throws IOException, InterruptedException { tablesInfo.clear(); // regenerating the data // generate region split structure Collection hbckInfos = regionInfoMap.values(); @@ -799,7 +802,7 @@ public class HBaseFsck extends Configured implements Tool { // get table name from hdfs, populate various HBaseFsck tables. - String tableName = Bytes.toString(hbi.getTableName()); + TableName tableName = hbi.getTableName(); if (tableName == null) { // There was an entry in META not in the HDFS? LOG.warn("tableName was null for: " + hbi); @@ -861,7 +864,7 @@ public class HBaseFsck extends Configured implements Tool { * 3. the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}
* @throws IOException */ - private boolean fabricateTableInfo(FSTableDescriptors fstd, String tableName, + private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName, Set columns) throws IOException { if (columns ==null || columns.isEmpty()) return false; HTableDescriptor htd = new HTableDescriptor(tableName); @@ -899,19 +902,21 @@ public class HBaseFsck extends Configured implements Tool { public void fixOrphanTables() throws IOException { if (shouldFixTableOrphans() && !orphanTableDirs.isEmpty()) { - List tmpList = new ArrayList(); + List tmpList = new ArrayList(); tmpList.addAll(orphanTableDirs.keySet()); HTableDescriptor[] htds = getHTableDescriptors(tmpList); - Iterator>> iter = orphanTableDirs.entrySet().iterator(); + Iterator>> iter = + orphanTableDirs.entrySet().iterator(); int j = 0; int numFailedCase = 0; FSTableDescriptors fstd = new FSTableDescriptors(getConf()); while (iter.hasNext()) { - Entry> entry = (Entry>) iter.next(); - String tableName = entry.getKey(); + Entry> entry = + (Entry>) iter.next(); + TableName tableName = entry.getKey(); LOG.info("Trying to fix orphan table error: " + tableName); if (j < htds.length) { - if (tableName.equals(Bytes.toString(htds[j].getName()))) { + if (tableName.equals(htds[j].getTableName())) { HTableDescriptor htd = htds[j]; LOG.info("fixing orphan table: " + tableName + " from cache"); fstd.createTableDescriptor(htd, true); @@ -969,14 +974,15 @@ public class HBaseFsck extends Configured implements Tool { * * @return An array list of puts to do in bulk, null if tables have problems */ - private ArrayList generatePuts(SortedMap tablesInfo) throws IOException { + private ArrayList generatePuts( + SortedMap tablesInfo) throws IOException { ArrayList puts = new ArrayList(); boolean hasProblems = false; - for (Entry e : tablesInfo.entrySet()) { - String name = e.getKey(); + for (Entry e : tablesInfo.entrySet()) { + TableName name = e.getKey(); // skip ".META." - if (Bytes.compareTo(Bytes.toBytes(name), HConstants.META_TABLE_NAME) == 0) { + if (name.compareTo(TableName.META_TABLE_NAME) == 0) { continue; } @@ -1006,7 +1012,8 @@ public class HBaseFsck extends Configured implements Tool { /** * Suggest fixes for each table */ - private void suggestFixes(SortedMap tablesInfo) throws IOException { + private void suggestFixes( + SortedMap tablesInfo) throws IOException { for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors); tInfo.checkRegionChain(handler); @@ -1077,7 +1084,7 @@ public class HBaseFsck extends Configured implements Tool { return true; } - private SortedMap checkHdfsIntegrity(boolean fixHoles, + private SortedMap checkHdfsIntegrity(boolean fixHoles, boolean fixOverlaps) throws IOException { LOG.info("Checking HBase region split map from HDFS data..."); for (TableInfo tInfo : tablesInfo.values()) { @@ -1123,7 +1130,7 @@ public class HBaseFsck extends Configured implements Tool { */ Path sidelineRegionDir(FileSystem fs, String parentDir, HbckInfo hi) throws IOException { - String tableName = Bytes.toString(hi.getTableName()); + TableName tableName = hi.getTableName(); Path regionDir = hi.getHdfsRegionDir(); if (!fs.exists(regionDir)) { @@ -1135,7 +1142,7 @@ public class HBaseFsck extends Configured implements Tool { if (parentDir != null) { rootDir = new Path(rootDir, parentDir); } - Path sidelineTableDir= new Path(rootDir, tableName); + Path sidelineTableDir= FSUtils.getTableDir(rootDir, tableName); Path sidelineRegionDir = new Path(sidelineTableDir, regionDir.getName()); fs.mkdirs(sidelineRegionDir); boolean success = false; @@ -1194,16 +1201,16 @@ public class HBaseFsck extends Configured implements Tool { /** * Side line an entire table. */ - void sidelineTable(FileSystem fs, byte[] table, Path hbaseDir, + void sidelineTable(FileSystem fs, TableName tableName, Path hbaseDir, Path backupHbaseDir) throws IOException { - String tableName = Bytes.toString(table); - Path tableDir = new Path(hbaseDir, tableName); + Path tableDir = FSUtils.getTableDir(hbaseDir, tableName); if (fs.exists(tableDir)) { - Path backupTableDir= new Path(backupHbaseDir, tableName); + Path backupTableDir= FSUtils.getTableDir(backupHbaseDir, tableName); + fs.mkdirs(backupTableDir.getParent()); boolean success = fs.rename(tableDir, backupTableDir); if (!success) { throw new IOException("Failed to move " + tableName + " from " - + tableDir.getName() + " to " + backupTableDir.getName()); + + tableDir + " to " + backupTableDir); } } else { LOG.info("No previous " + tableName + " exists. Continuing."); @@ -1221,7 +1228,7 @@ public class HBaseFsck extends Configured implements Tool { fs.mkdirs(backupDir); try { - sidelineTable(fs, HConstants.META_TABLE_NAME, hbaseDir, backupDir); + sidelineTable(fs, TableName.META_TABLE_NAME, hbaseDir, backupDir); } catch (IOException e) { LOG.fatal("... failed to sideline meta. Currently in inconsistent state. To restore " + "try to rename .META. in " + backupDir.getName() + " to " @@ -1243,8 +1250,9 @@ public class HBaseFsck extends Configured implements Tool { public Void connect(HConnection connection) throws IOException { ZooKeeperWatcher zkw = createZooKeeperWatcher(); try { - for (String tableName : ZKTableReadOnly.getDisabledOrDisablingTables(zkw)) { - disabledTables.add(Bytes.toBytes(tableName)); + for (TableName tableName : + ZKTableReadOnly.getDisabledOrDisablingTables(zkw)) { + disabledTables.add(tableName); } } catch (KeeperException ke) { throw new IOException(ke); @@ -1274,18 +1282,16 @@ public class HBaseFsck extends Configured implements Tool { // list all tables from HDFS List tableDirs = Lists.newArrayList(); - boolean foundVersionFile = false; - FileStatus[] files = fs.listStatus(rootDir); - for (FileStatus file : files) { - String dirName = file.getPath().getName(); - if (dirName.equals(HConstants.VERSION_FILE_NAME)) { - foundVersionFile = true; - } else { - if ((!checkMetaOnly && isTableIncluded(dirName)) || - dirName.equals(".META.")) { - tableDirs.add(file); - } - } + boolean foundVersionFile = fs.exists(new Path(rootDir, HConstants.VERSION_FILE_NAME)); + + List paths = FSUtils.getTableDirs(fs, rootDir); + for (Path path : paths) { + TableName tableName = FSUtils.getTableName(path); + if ((!checkMetaOnly && + isTableIncluded(tableName)) || + tableName.equals(TableName.META_TABLE_NAME)) { + tableDirs.add(fs.getFileStatus(path)); + } } // verify that version file exists @@ -1329,7 +1335,7 @@ public class HBaseFsck extends Configured implements Tool { */ private boolean recordMetaRegion() throws IOException { HRegionLocation metaLocation = connection.locateRegion( - HConstants.META_TABLE_NAME, HConstants.EMPTY_START_ROW); + TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW); // Check if Meta region is valid and existing if (metaLocation == null || metaLocation.getRegionInfo() == null || @@ -1774,8 +1780,8 @@ public class HBaseFsck extends Configured implements Tool { * repeated or overlapping ones. * @throws IOException */ - SortedMap checkIntegrity() throws IOException { - tablesInfo = new TreeMap (); + SortedMap checkIntegrity() throws IOException { + tablesInfo = new TreeMap (); List noHDFSRegionInfos = new ArrayList(); LOG.debug("There are " + regionInfoMap.size() + " region info entries"); for (HbckInfo hbi : regionInfoMap.values()) { @@ -1812,7 +1818,7 @@ public class HBaseFsck extends Configured implements Tool { if (hbi.deployedOn.size() == 0) continue; // We should be safe here - String tableName = hbi.metaEntry.getTableNameAsString(); + TableName tableName = hbi.metaEntry.getTableName(); TableInfo modTInfo = tablesInfo.get(tableName); if (modTInfo == null) { modTInfo = new TableInfo(tableName); @@ -1902,7 +1908,7 @@ public class HBaseFsck extends Configured implements Tool { * Maintain information about a particular table. */ public class TableInfo { - String tableName; + TableName tableName; TreeSet deployedOn; // backwards regions @@ -1921,7 +1927,7 @@ public class HBaseFsck extends Configured implements Tool { final Multimap overlapGroups = TreeMultimap.create(RegionSplitCalculator.BYTES_COMPARATOR, cmp); - TableInfo(String name) { + TableInfo(TableName name) { this.tableName = name; deployedOn = new TreeSet (); } @@ -1966,7 +1972,7 @@ public class HBaseFsck extends Configured implements Tool { this.deployedOn.add(server); } - public String getName() { + public TableName getName() { return tableName; } @@ -2071,7 +2077,7 @@ public class HBaseFsck extends Configured implements Tool { getTableInfo(), next); HTableDescriptor htd = getTableInfo().getHTD(); // from special EMPTY_START_ROW to next region's startKey - HRegionInfo newRegion = new HRegionInfo(htd.getName(), + HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, next.getStartKey()); // TODO test @@ -2087,7 +2093,7 @@ public class HBaseFsck extends Configured implements Tool { + "region and regioninfo in HDFS to plug the hole.", getTableInfo()); HTableDescriptor htd = getTableInfo().getHTD(); // from curEndKey to EMPTY_START_ROW - HRegionInfo newRegion = new HRegionInfo(htd.getName(), curEndKey, + HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), curEndKey, HConstants.EMPTY_START_ROW); HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); @@ -2109,7 +2115,7 @@ public class HBaseFsck extends Configured implements Tool { + ". Creating a new regioninfo and region " + "dir in hdfs to plug the hole."); HTableDescriptor htd = getTableInfo().getHTD(); - HRegionInfo newRegion = new HRegionInfo(htd.getName(), holeStartKey, holeStopKey); + HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), holeStartKey, holeStopKey); HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); LOG.info("Plugged hold by creating new empty region: "+ newRegion + " " +region); fixes++; @@ -2194,7 +2200,7 @@ public class HBaseFsck extends Configured implements Tool { // create new empty container region. HTableDescriptor htd = getTableInfo().getHTD(); // from start key to end Key - HRegionInfo newRegion = new HRegionInfo(htd.getName(), range.getFirst(), + HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), range.getFirst(), range.getSecond()); HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); LOG.info("Created new empty container region: " + @@ -2275,7 +2281,7 @@ public class HBaseFsck extends Configured implements Tool { // When table is disabled no need to check for the region chain. Some of the regions // accidently if deployed, this below code might report some issues like missing start // or end regions or region hole in chain and may try to fix which is unwanted. - if (disabledTables.contains(this.tableName.getBytes())) { + if (disabledTables.contains(this.tableName)) { return true; } int originalErrorsCount = errors.getErrorList().size(); @@ -2416,7 +2422,7 @@ public class HBaseFsck extends Configured implements Tool { public void dumpSidelinedRegions(Map regions) { for (Map.Entry entry: regions.entrySet()) { - String tableName = Bytes.toStringBinary(entry.getValue().getTableName()); + TableName tableName = entry.getValue().getTableName(); Path path = entry.getKey(); errors.print("This sidelined region dir should be bulk loaded: " + path.toString()); @@ -2427,7 +2433,7 @@ public class HBaseFsck extends Configured implements Tool { } public Multimap getOverlapGroups( - String table) { + TableName table) { TableInfo ti = tablesInfo.get(table); return ti.overlapGroups; } @@ -2442,7 +2448,7 @@ public class HBaseFsck extends Configured implements Tool { * @throws IOException if an error is encountered */ HTableDescriptor[] getTables(AtomicInteger numSkipped) { - List tableNames = new ArrayList(); + List tableNames = new ArrayList(); long now = System.currentTimeMillis(); for (HbckInfo hbi : regionInfoMap.values()) { @@ -2452,7 +2458,7 @@ public class HBaseFsck extends Configured implements Tool { // pick only those tables that were not modified in the last few milliseconds. if (info != null && info.getStartKey().length == 0 && !info.isMetaRegion()) { if (info.modTime + timelag < now) { - tableNames.add(info.getTableNameAsString()); + tableNames.add(info.getTableName()); } else { numSkipped.incrementAndGet(); // one more in-flux table } @@ -2461,11 +2467,11 @@ public class HBaseFsck extends Configured implements Tool { return getHTableDescriptors(tableNames); } - HTableDescriptor[] getHTableDescriptors(List tableNames) { + HTableDescriptor[] getHTableDescriptors(List tableNames) { HTableDescriptor[] htd = new HTableDescriptor[0]; try { LOG.info("getHTableDescriptors == tableNames => " + tableNames); - htd = new HBaseAdmin(getConf()).getTableDescriptors(tableNames); + htd = new HBaseAdmin(getConf()).getTableDescriptorsByTableName(tableNames); } catch (IOException e) { LOG.debug("Exception getting table descriptors", e); } @@ -2576,7 +2582,7 @@ public class HBaseFsck extends Configured implements Tool { sn = pair.getSecond(); } HRegionInfo hri = pair.getFirst(); - if (!(isTableIncluded(hri.getTableNameAsString()) + if (!(isTableIncluded(hri.getTableName()) || hri.isMetaRegion())) { return true; } @@ -2653,7 +2659,7 @@ public class HBaseFsck extends Configured implements Tool { hash ^= Arrays.hashCode(getStartKey()); hash ^= Arrays.hashCode(getEndKey()); hash ^= Boolean.valueOf(isOffline()).hashCode(); - hash ^= Arrays.hashCode(getTableName()); + hash ^= getTableName().hashCode(); if (regionServer != null) { hash ^= regionServer.hashCode(); } @@ -2742,14 +2748,14 @@ public class HBaseFsck extends Configured implements Tool { } } - public byte[] getTableName() { + public TableName getTableName() { if (this.metaEntry != null) { return this.metaEntry.getTableName(); } else if (this.hdfsEntry != null) { // we are only guaranteed to have a path and not an HRI for hdfsEntry, // so we get the name from the Path Path tableDir = this.hdfsEntry.hdfsRegionDir.getParent(); - return Bytes.toBytes(tableDir.getName()); + return FSUtils.getTableName(tableDir); } else { // Currently no code exercises this path, but we could add one for // getting table name from OnlineEntry @@ -2830,8 +2836,7 @@ public class HBaseFsck extends Configured implements Tool { return 0; } - int tableCompare = RegionSplitCalculator.BYTES_COMPARATOR.compare( - l.getTableName(), r.getTableName()); + int tableCompare = l.getTableName().compareTo(r.getTableName()); if (tableCompare != 0) { return tableCompare; } @@ -2874,7 +2879,7 @@ public class HBaseFsck extends Configured implements Tool { /** * Prints summary of all tables found on the system. */ - private void printTableSummary(SortedMap tablesInfo) { + private void printTableSummary(SortedMap tablesInfo) { StringBuilder sb = new StringBuilder(); errors.print("Summary:"); for (TableInfo tInfo : tablesInfo.values()) { @@ -3101,7 +3106,7 @@ public class HBaseFsck extends Configured implements Tool { List ret = Lists.newArrayList(); for (HRegionInfo hri : regions) { if (hri.isMetaTable() || (!hbck.checkMetaOnly - && hbck.isTableIncluded(hri.getTableNameAsString()))) { + && hbck.isTableIncluded(hri.getTableName()))) { ret.add(hri); } } @@ -3130,12 +3135,6 @@ public class HBaseFsck extends Configured implements Tool { @Override public synchronized Void call() throws IOException { try { - String tableName = tableDir.getPath().getName(); - // ignore hidden files - if (tableName.startsWith(".") && - !tableName.equals( Bytes.toString(HConstants.META_TABLE_NAME))) { - return null; - } // level 2: /
/* FileStatus[] regionDirs = fs.listStatus(tableDir.getPath()); for (FileStatus regionDir : regionDirs) { @@ -3209,7 +3208,7 @@ public class HBaseFsck extends Configured implements Tool { hbck.loadHdfsRegioninfo(hbi); } catch (IOException ioe) { String msg = "Orphan region in HDFS: Unable to load .regioninfo from table " - + Bytes.toString(hbi.getTableName()) + " in hdfs dir " + + hbi.getTableName() + " in hdfs dir " + hbi.getHdfsRegionDir() + "! It may be an invalid format or version file. Treating as " + "an orphaned regiondir."; @@ -3405,16 +3404,16 @@ public class HBaseFsck extends Configured implements Tool { * Only check/fix tables specified by the list, * Empty list means all tables are included. */ - boolean isTableIncluded(String table) { + boolean isTableIncluded(TableName table) { return (tablesIncluded.size() == 0) || tablesIncluded.contains(table); } - public void includeTable(String table) { + public void includeTable(TableName table) { tablesIncluded.add(table); } - Set getIncludedTables() { - return new HashSet(tablesIncluded); + Set getIncludedTables() { + return new HashSet(tablesIncluded); } /** @@ -3681,7 +3680,7 @@ public class HBaseFsck extends Configured implements Tool { errors.reportError(ERROR_CODE.WRONG_USAGE, "Unrecognized option:" + cmd); return printUsageAndExit(); } else { - includeTable(cmd); + includeTable(TableName.valueOf(cmd)); errors.print("Allow checking/fixes for table: " + cmd); } } @@ -3703,12 +3702,12 @@ public class HBaseFsck extends Configured implements Tool { LOG.info("Checking all hfiles for corruption"); HFileCorruptionChecker hfcc = createHFileCorruptionChecker(sidelineCorruptHFiles); setHFileCorruptionChecker(hfcc); // so we can get result - Collection tables = getIncludedTables(); + Collection tables = getIncludedTables(); Collection tableDirs = new ArrayList(); Path rootdir = FSUtils.getRootDir(getConf()); if (tables.size() > 0) { - for (String t : tables) { - tableDirs.add(FSUtils.getTablePath(rootdir, t)); + for (TableName t : tables) { + tableDirs.add(FSUtils.getTableDir(rootdir, t)); } } else { tableDirs = FSUtils.getTableDirs(FSUtils.getCurrentFileSystem(getConf()), rootdir); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java index 695ea02..57b02ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java @@ -28,10 +28,10 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -177,7 +177,7 @@ public class HBaseFsckRepair { */ public static void fixMetaHoleOnline(Configuration conf, HRegionInfo hri) throws IOException { - HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); + HTable meta = new HTable(conf, TableName.META_TABLE_NAME); MetaEditor.addRegionToMeta(meta, hri); meta.close(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java index ac6929f..707aecb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java @@ -19,11 +19,10 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -46,7 +45,8 @@ public class HFileArchiveUtil { * @return {@link Path} to the directory to archive the given store or * null if it should not be archived */ - public static Path getStoreArchivePath(final Configuration conf, final String tableName, + public static Path getStoreArchivePath(final Configuration conf, + final TableName tableName, final String regionName, final String familyName) throws IOException { Path tableArchiveDir = getTableArchivePath(conf, tableName); return HStore.getStoreHomedir(tableArchiveDir, regionName, Bytes.toBytes(familyName)); @@ -54,29 +54,36 @@ public class HFileArchiveUtil { /** * Get the directory to archive a store directory - * @param conf {@link Configuration} to read for the archive directory name. Can be null. + * @param conf {@link Configuration} to read for the archive directory name. * @param region parent region information under which the store currently lives * @param tabledir directory for the table under which the store currently lives * @param family name of the family in the store * @return {@link Path} to the directory to archive the given store or null if it should * not be archived */ - public static Path getStoreArchivePath(Configuration conf, HRegionInfo region, Path tabledir, - byte[] family) { - Path tableArchiveDir = getTableArchivePath(tabledir); + public static Path getStoreArchivePath(Configuration conf, + HRegionInfo region, + Path tabledir, + byte[] family) throws IOException { + TableName tableName = + FSUtils.getTableName(tabledir); + Path rootDir = FSUtils.getRootDir(conf); + Path tableArchiveDir = getTableArchivePath(rootDir, tableName); return HStore.getStoreHomedir(tableArchiveDir, region, family); } /** * Get the archive directory for a given region under the specified table - * @param tabledir the original table directory. Cannot be null. + * @param tableName the table name. Cannot be null. * @param regiondir the path to the region directory. Cannot be null. * @return {@link Path} to the directory to archive the given region, or null if it * should not be archived */ - public static Path getRegionArchiveDir(Path tabledir, Path regiondir) { + public static Path getRegionArchiveDir(Path rootDir, + TableName tableName, + Path regiondir) { // get the archive directory for a table - Path archiveDir = getTableArchivePath(tabledir); + Path archiveDir = getTableArchivePath(rootDir, tableName); // then add on the region path under the archive String encodedRegionName = regiondir.getName(); @@ -85,19 +92,16 @@ public class HFileArchiveUtil { /** * Get the archive directory for a given region under the specified table - * @param rootdir {@link Path} to the root directory where hbase files are stored (for building + * @param rootDir {@link Path} to the root directory where hbase files are stored (for building * the archive path) - * @param tabledir the original table directory. Cannot be null. - * @param regiondir the path to the region directory. Cannot be null. + * @param tableName name of the table to archive. Cannot be null. * @return {@link Path} to the directory to archive the given region, or null if it * should not be archived */ - public static Path getRegionArchiveDir(Path rootdir, Path tabledir, Path regiondir) { + public static Path getRegionArchiveDir(Path rootDir, + TableName tableName, String encodedRegionName) { // get the archive directory for a table - Path archiveDir = getTableArchivePath(rootdir, tabledir.getName()); - - // then add on the region path under the archive - String encodedRegionName = regiondir.getName(); + Path archiveDir = getTableArchivePath(rootDir, tableName); return HRegion.getRegionDir(archiveDir, encodedRegionName); } @@ -107,27 +111,13 @@ public class HFileArchiveUtil { * Get the path to the table's archive directory. *

* Generally of the form: /hbase/.archive/[tablename] - * @param tabledir directory of the table to be archived. Cannot be null. - * @return {@link Path} to the archive directory for the table - */ - public static Path getTableArchivePath(Path tabledir) { - Path root = tabledir.getParent(); - return getTableArchivePath(root, tabledir.getName()); - } - - /** - * Get the path to the table archive directory based on the configured archive directory. - *

- * Get the path to the table's archive directory. - *

- * Generally of the form: /hbase/.archive/[tablename] * @param rootdir {@link Path} to the root directory where hbase files are stored (for building * the archive path) * @param tableName Name of the table to be archived. Cannot be null. * @return {@link Path} to the archive directory for the table */ - public static Path getTableArchivePath(final Path rootdir, final String tableName) { - return new Path(getArchivePath(rootdir), tableName); + public static Path getTableArchivePath(final Path rootdir, final TableName tableName) { + return FSUtils.getTableDir(getArchivePath(rootdir), tableName); } /** @@ -138,9 +128,10 @@ public class HFileArchiveUtil { * @param tableName Name of the table to be archived. Cannot be null. * @return {@link Path} to the archive directory for the table */ - public static Path getTableArchivePath(final Configuration conf, final String tableName) + public static Path getTableArchivePath(final Configuration conf, + final TableName tableName) throws IOException { - return new Path(getArchivePath(conf), tableName); + return FSUtils.getTableDir(getArchivePath(conf), tableName); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java index 5a43370..1465cf4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java @@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -79,7 +80,7 @@ class HMerge { * @throws IOException */ public static void merge(Configuration conf, FileSystem fs, - final byte [] tableName) + final TableName tableName) throws IOException { merge(conf, fs, tableName, true); } @@ -100,7 +101,7 @@ class HMerge { * @throws IOException */ public static void merge(Configuration conf, FileSystem fs, - final byte [] tableName, final boolean testMasterRunning) + final TableName tableName, final boolean testMasterRunning) throws IOException { boolean masterIsRunning = false; if (testMasterRunning) { @@ -112,7 +113,7 @@ class HMerge { } }); } - if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { + if (tableName.equals(TableName.META_TABLE_NAME)) { if (masterIsRunning) { throw new IllegalStateException( "Can not compact META table if instance is on-line"); @@ -140,7 +141,7 @@ class HMerge { private final long maxFilesize; - protected Merger(Configuration conf, FileSystem fs, final byte [] tableName) + protected Merger(Configuration conf, FileSystem fs, final TableName tableName) throws IOException { this.conf = conf; this.fs = fs; @@ -148,7 +149,7 @@ class HMerge { HConstants.DEFAULT_MAX_FILE_SIZE); this.rootDir = FSUtils.getRootDir(conf); - Path tabledir = HTableDescriptor.getTableDir(this.rootDir, tableName); + Path tabledir = FSUtils.getTableDir(this.rootDir, tableName); this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir); String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME; @@ -225,17 +226,17 @@ class HMerge { /** Instantiated to compact a normal user table */ private static class OnlineMerger extends Merger { - private final byte [] tableName; + private final TableName tableName; private final HTable table; private final ResultScanner metaScanner; private HRegionInfo latestRegion; OnlineMerger(Configuration conf, FileSystem fs, - final byte [] tableName) + final TableName tableName) throws IOException { super(conf, fs, tableName); this.tableName = tableName; - this.table = new HTable(conf, HConstants.META_TABLE_NAME); + this.table = new HTable(conf, TableName.META_TABLE_NAME); this.metaScanner = table.getScanner(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); this.latestRegion = null; @@ -253,7 +254,7 @@ class HMerge { Bytes.toString(HConstants.CATALOG_FAMILY) + ":" + Bytes.toString(HConstants.REGIONINFO_QUALIFIER)); } - if (!Bytes.equals(region.getTableName(), this.tableName)) { + if (!region.getTableName().equals(this.tableName)) { return null; } return region; @@ -281,6 +282,11 @@ class HMerge { currentRow = metaScanner.next(); continue; } + HRegionInfo region = HRegionInfo.getHRegionInfo(currentRow); + if (!region.getTableName().equals(this.tableName)) { + currentRow = metaScanner.next(); + continue; + } foundResult = true; break; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java index 3ade888..23693b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -58,7 +59,7 @@ public class Merge extends Configured implements Tool { static final Log LOG = LogFactory.getLog(Merge.class); private Path rootdir; private volatile MetaUtils utils; - private byte [] tableName; // Name of table + private TableName tableName; // Name of table private volatile byte [] region1; // Name of region 1 private volatile byte [] region2; // Name of region 2 private volatile HRegionInfo mergeInfo; @@ -131,7 +132,7 @@ public class Merge extends Configured implements Tool { */ private void mergeTwoRegions() throws IOException { LOG.info("Merging regions " + Bytes.toStringBinary(this.region1) + " and " + - Bytes.toStringBinary(this.region2) + " in table " + Bytes.toString(this.tableName)); + Bytes.toStringBinary(this.region2) + " in table " + this.tableName); HRegion meta = this.utils.getMetaRegion(); Get get = new Get(region1); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); @@ -153,7 +154,7 @@ public class Merge extends Configured implements Tool { throw new NullPointerException("info2 is null using key " + meta); } HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()), - this.rootdir, Bytes.toString(this.tableName)); + this.rootdir, this.tableName); HRegion merged = merge(htd, meta, info1, info2); LOG.info("Adding " + merged.getRegionInfo() + " to " + @@ -244,7 +245,7 @@ public class Merge extends Configured implements Tool { usage(); return -1; } - tableName = Bytes.toBytes(remainingArgs[0]); + tableName = TableName.valueOf(remainingArgs[0]); region1 = Bytes.toBytesBinary(remainingArgs[1]); region2 = Bytes.toBytesBinary(remainingArgs[2]); @@ -258,10 +259,11 @@ public class Merge extends Configured implements Tool { return status; } - private boolean notInTable(final byte [] tn, final byte [] rn) { - if (WritableComparator.compareBytes(tn, 0, tn.length, rn, 0, tn.length) != 0) { + private boolean notInTable(final TableName tn, final byte [] rn) { + if (WritableComparator.compareBytes(tn.getName(), 0, tn.getName().length, + rn, 0, tn.getName().length) != 0) { LOG.error("Region " + Bytes.toStringBinary(rn) + " does not belong to table " + - Bytes.toString(tn)); + tn); return true; } return false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java index 6beed92..e00528d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java @@ -95,7 +95,7 @@ public abstract class ModifyRegionUtils { if (newRegions == null) return null; int regionNumber = newRegions.length; ThreadPoolExecutor regionOpenAndInitThreadPool = getRegionOpenAndInitThreadPool(conf, - "RegionOpenAndInitThread-" + hTableDescriptor.getNameAsString(), regionNumber); + "RegionOpenAndInitThread-" + hTableDescriptor.getTableName(), regionNumber); CompletionService completionService = new ExecutorCompletionService( regionOpenAndInitThreadPool); List regionInfos = new ArrayList(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index 6df4790..b01d562 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -47,11 +47,11 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; @@ -374,7 +374,7 @@ public class RegionSplitter { LOG.debug("Creating table " + tableName + " with " + columnFamilies.length + " column families. Presplitting to " + splitCount + " regions"); - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); for (String cf : columnFamilies) { desc.addFamily(new HColumnDescriptor(Bytes.toBytes(cf))); } @@ -410,7 +410,7 @@ public class RegionSplitter { Math.max(table.getConnection().getCurrentNrHRS() / 2, minOS); Path hbDir = FSUtils.getRootDir(conf); - Path tableDir = HTableDescriptor.getTableDir(hbDir, table.getTableName()); + Path tableDir = FSUtils.getTableDir(hbDir, table.getName()); Path splitFile = new Path(tableDir, "_balancedSplit"); FileSystem fs = FileSystem.get(conf); @@ -640,7 +640,7 @@ public class RegionSplitter { // get table info Path rootDir = FSUtils.getRootDir(table.getConfiguration()); - Path tableDir = HTableDescriptor.getTableDir(rootDir, table.getTableName()); + Path tableDir = FSUtils.getTableDir(rootDir, table.getName()); FileSystem fs = tableDir.getFileSystem(table.getConfiguration()); HTableDescriptor htd = table.getTableDescriptor(); @@ -684,7 +684,7 @@ public class RegionSplitter { // check every Column Family for that region boolean refFound = false; for (HColumnDescriptor c : htd.getFamilies()) { - if ((refFound = regionFs.hasReferences(htd.getNameAsString()))) { + if ((refFound = regionFs.hasReferences(htd.getTableName().getNameAsString()))) { break; } } @@ -716,7 +716,7 @@ public class RegionSplitter { static LinkedList> getSplits(HTable table, SplitAlgorithm splitAlgo) throws IOException { Path hbDir = FSUtils.getRootDir(table.getConfiguration()); - Path tableDir = HTableDescriptor.getTableDir(hbDir, table.getTableName()); + Path tableDir = FSUtils.getTableDir(hbDir, table.getName()); Path splitFile = new Path(tableDir, "_balancedSplit"); FileSystem fs = tableDir.getFileSystem(table.getConfiguration()); diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp index cce21f9..4a43335 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp @@ -36,7 +36,8 @@ import="org.apache.hadoop.util.StringUtils" import="java.util.List" import="java.util.Map" - import="org.apache.hadoop.hbase.HConstants"%><% + import="org.apache.hadoop.hbase.HConstants"%> +<%@ page import="org.apache.hadoop.hbase.TableName" %><% HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); Configuration conf = master.getConfiguration(); HBaseAdmin hbadmin = new HBaseAdmin(conf); @@ -44,10 +45,12 @@ String snapshotName = request.getParameter("name"); SnapshotDescription snapshot = null; SnapshotInfo.SnapshotStats stats = null; + TableName snapshotTable = null; for (SnapshotDescription snapshotDesc: hbadmin.listSnapshots()) { if (snapshotName.equals(snapshotDesc.getName())) { snapshot = snapshotDesc; stats = SnapshotInfo.getSnapshotStats(conf, snapshot); + snapshotTable = TableName.valueOf(snapshot.getTable()); break; } } @@ -162,7 +165,8 @@

- + diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 6bbf478..36bc20e 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -34,12 +34,14 @@ import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.util.Bytes" import="org.apache.hadoop.hbase.util.FSUtils" - import="org.apache.hadoop.hbase.protobuf.ProtobufUtil"%><% + import="org.apache.hadoop.hbase.protobuf.ProtobufUtil"%> +<%@ page import="org.apache.hadoop.hbase.TableName" %> +<% HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); Configuration conf = master.getConfiguration(); HBaseAdmin hbadmin = new HBaseAdmin(conf); - String tableName = request.getParameter("name"); - HTable table = new HTable(conf, tableName); + String fqtn = request.getParameter("name"); + HTable table = new HTable(conf, fqtn); String tableHeader = "

Table Regions

State
<%= snapshot.getTable() %> + <%= snapshotTable.getNameAsString() %> <%= new Date(snapshot.getCreationTime()) %> <%= snapshot.getType() %> <%= snapshot.getVersion() %>
"; ServerName rl = master.getCatalogTracker().getMetaLocation(); boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false); @@ -118,7 +120,7 @@ if (key != null && key.length() > 0) { hbadmin.split(key); } else { - hbadmin.split(tableName); + hbadmin.split(fqtn); } %> Split request accepted. <% @@ -126,7 +128,7 @@ if (key != null && key.length() > 0) { hbadmin.compact(key); } else { - hbadmin.compact(tableName); + hbadmin.compact(fqtn); } %> Compact request accepted. <% } @@ -141,7 +143,7 @@ %> - Table: <%= tableName %> + Table: <%= fqtn %> @@ -182,13 +184,13 @@
<% - if(tableName.equals(Bytes.toString(HConstants.META_TABLE_NAME))) { + if(fqtn.equals(TableName.META_TABLE_NAME.getNameAsString())) { %> <%= tableHeader %> <% @@ -229,7 +231,7 @@ <% if (showFragmentation) { %>
- + <% } %> @@ -314,7 +316,7 @@ Actions: - + @@ -328,7 +330,7 @@ Actions: - + diff --git a/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp b/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp index 7bc5875..6389a2f 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp @@ -90,7 +90,7 @@ <% for(HTableDescriptor htDesc : tables ) { %> - + <% } %> diff --git a/hbase-server/src/main/ruby/hbase/admin.rb b/hbase-server/src/main/ruby/hbase/admin.rb index 30ae110..f78c2b3 100644 --- a/hbase-server/src/main/ruby/hbase/admin.rb +++ b/hbase-server/src/main/ruby/hbase/admin.rb @@ -42,7 +42,7 @@ module Hbase #---------------------------------------------------------------------------------------------- # Returns a list of tables in hbase def list(regex = ".*") - @admin.listTables(regex).map { |t| t.getNameAsString } + @admin.listTables(regex).map { |t| t.getTableName().getNameAsString } end #---------------------------------------------------------------------------------------------- @@ -152,7 +152,7 @@ module Hbase # Disables all tables matching the given regex def disable_all(regex) regex = regex.to_s - @admin.disableTables(regex).map { |t| t.getNameAsString } + @admin.disableTables(regex).map { |t| t.getTableName().getNameAsString } end #--------------------------------------------------------------------------------------------- @@ -180,7 +180,7 @@ module Hbase # Drops a table def drop_all(regex) regex = regex.to_s - failed = @admin.deleteTables(regex).map { |t| t.getNameAsString } + failed = @admin.deleteTables(regex).map { |t| t.getTableName().getNameAsString } return failed end @@ -201,7 +201,7 @@ module Hbase has_columns = false # Start defining the table - htd = org.apache.hadoop.hbase.HTableDescriptor.new(table_name) + htd = org.apache.hadoop.hbase.HTableDescriptor.new(org.apache.hadoop.hbase.TableName.valueOf(table_name)) splits = nil # Args are either columns or splits, add them to the table definition # TODO: add table options support @@ -368,7 +368,7 @@ module Hbase status = Pair.new() begin - status = @admin.getAlterStatus(table_name.to_java_bytes) + status = @admin.getAlterStatus(org.apache.hadoop.hbase.TableName.valueOf(table_name)) if status.getSecond() != 0 puts "#{status.getSecond() - status.getFirst()}/#{status.getSecond()} regions updated." else @@ -650,7 +650,8 @@ module Hbase # Enables/disables a region by name def online(region_name, on_off) # Open meta table - meta = org.apache.hadoop.hbase.client.HTable.new(org.apache.hadoop.hbase.HConstants::META_TABLE_NAME) + meta = org.apache.hadoop.hbase.client.HTable.new( + org.apache.hadoop.hbase.TableName::META_TABLE_NAME) # Read region info # FIXME: fail gracefully if can't find the region @@ -722,5 +723,101 @@ module Hbase end end + #---------------------------------------------------------------------------------------------- + # Returns namespace's structure description + def describe_namespace(namespace_name) + namespace = @admin.getNamespaceDescriptor(namespace_name) + + unless namespace.nil? + return namespace.to_s + end + + raise(ArgumentError, "Failed to find namespace named #{namespace_name}") + end + + #---------------------------------------------------------------------------------------------- + # Returns a list of namespaces in hbase + def list_namespace + @admin.listNamespaceDescriptors.map { |ns| ns.getName } + end + + #---------------------------------------------------------------------------------------------- + # Returns a list of tables in namespace + def list_namespace_tables(namespace_name) + unless namespace_name.nil? + return @admin.getTableDescriptorsByNamespace(namespace_name).map { |t| t.getTableName().getNameAsString } + end + + raise(ArgumentError, "Failed to find namespace named #{namespace_name}") + end + + #---------------------------------------------------------------------------------------------- + # Creates a namespace + def create_namespace(namespace_name, *args) + # Fail if table name is not a string + raise(ArgumentError, "Namespace name must be of type String") unless namespace_name.kind_of?(String) + + # Flatten params array + args = args.flatten.compact + + # Start defining the table + nsb = org.apache.hadoop.hbase.NamespaceDescriptor::create(namespace_name) + args.each do |arg| + unless arg.kind_of?(Hash) + raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash or String type") + end + for k,v in arg + v = v.to_s unless v.nil? + nsb.addProperty(k, v) + end + end + @admin.createNamespace(nsb.build()); + end + + #---------------------------------------------------------------------------------------------- + # modify a namespace + def alter_namespace(namespace_name, *args) + # Fail if table name is not a string + raise(ArgumentError, "Namespace name must be of type String") unless namespace_name.kind_of?(String) + + nsd = @admin.getNamespaceDescriptor(namespace_name) + + unless nsd + raise(ArgumentError, "Namespace does not exist") + end + nsb = org.apache.hadoop.hbase.NamespaceDescriptor::create(nsd) + + # Flatten params array + args = args.flatten.compact + + # Start defining the table + args.each do |arg| + unless arg.kind_of?(Hash) + raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash type") + end + method = arg[METHOD] + if method == "unset" + nsb.removeConfiguration(arg[NAME]) + elsif method == "set" + arg.delete(METHOD) + for k,v in arg + v = v.to_s unless v.nil? + + nsb.addConfiguration(k, v) + end + else + raise(ArgumentError, "Unknown method #{method}") + end + end + @admin.modifyNamespace(nsb.build()); + end + + + #---------------------------------------------------------------------------------------------- + # Drops a table + def drop_namespace(namespace_name) + @admin.deleteNamespace(namespace_name) + end + end end diff --git a/hbase-server/src/main/ruby/hbase/table.rb b/hbase-server/src/main/ruby/hbase/table.rb index 2acd94e..a2f3f03 100644 --- a/hbase-server/src/main/ruby/hbase/table.rb +++ b/hbase-server/src/main/ruby/hbase/table.rb @@ -466,7 +466,8 @@ EOF # Checks if current table is one of the 'meta' tables def is_meta_table? tn = @table.table_name - org.apache.hadoop.hbase.util.Bytes.equals(tn, org.apache.hadoop.hbase.HConstants::META_TABLE_NAME) + org.apache.hadoop.hbase.util.Bytes.equals(tn, + org.apache.hadoop.hbase.TableName::META_TABLE_NAME.getName) end # Returns family and (when has it) qualifier for a column name diff --git a/hbase-server/src/main/ruby/shell.rb b/hbase-server/src/main/ruby/shell.rb index b1d5cc0..fb70b49 100644 --- a/hbase-server/src/main/ruby/shell.rb +++ b/hbase-server/src/main/ruby/shell.rb @@ -254,6 +254,19 @@ Shell.load_command_group( ) Shell.load_command_group( + 'namespace', + :full_name => 'NAMESPACE MANAGEMENT COMMANDS', + :commands => %w[ + namespace_create + namespace_drop + namespace_alter + namespace_describe + namespace_list + namespace_list_tables + ] +) + +Shell.load_command_group( 'dml', :full_name => 'DATA MANIPULATION COMMANDS', :commands => %w[ diff --git a/hbase-server/src/main/ruby/shell/commands.rb b/hbase-server/src/main/ruby/shell/commands.rb index 6dc35a1..dc92340 100644 --- a/hbase-server/src/main/ruby/shell/commands.rb +++ b/hbase-server/src/main/ruby/shell/commands.rb @@ -81,12 +81,12 @@ module Shell def translate_hbase_exceptions(*args) yield - rescue org.apache.hadoop.hbase.exceptions.TableNotFoundException + rescue org.apache.hadoop.hbase.TableNotFoundException raise "Unknown table #{args.first}!" - rescue org.apache.hadoop.hbase.exceptions.NoSuchColumnFamilyException + rescue org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException valid_cols = table(args.first).get_all_columns.map { |c| c + '*' } raise "Unknown column family! Valid column names: #{valid_cols.join(", ")}" - rescue org.apache.hadoop.hbase.exceptions.TableExistsException => e + rescue org.apache.hadoop.hbase.TableExistsException => e raise "Table already exists: #{e.message}!" end end diff --git a/hbase-server/src/main/ruby/shell/commands/namespace_alter.rb b/hbase-server/src/main/ruby/shell/commands/namespace_alter.rb new file mode 100644 index 0000000..a25c740 --- /dev/null +++ b/hbase-server/src/main/ruby/shell/commands/namespace_alter.rb @@ -0,0 +1,44 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class NamespaceAlter < Command + def help + return <<-EOF +Alter namespace properties. + +To add/modify a property: + + hbase> namespace_alter 'ns1', {METHOD => 'set', 'PROERTY_NAME' => 'PROPERTY_VALUE'} + +To delete a property: + + hbase> namespace_alter 'ns1', {METHOD => 'unset', NAME=>'PROERTY_NAME'} +EOF + end + + def command(namespace, *args) + format_simple_command do + admin.alter_namespace(namespace, *args) + end + end + end + end +end diff --git a/hbase-server/src/main/ruby/shell/commands/namespace_create.rb b/hbase-server/src/main/ruby/shell/commands/namespace_create.rb new file mode 100644 index 0000000..16e483d --- /dev/null +++ b/hbase-server/src/main/ruby/shell/commands/namespace_create.rb @@ -0,0 +1,41 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class NamespaceCreate < Command + def help + return <<-EOF +Create namespace; pass namespace name, +and optionally a dictionary of namespace configuration. +Examples: + + hbase> namespace_create 'ns1' + hbase> namespace_create 'ns1', {'PROERTY_NAME'=>'PROPERTY_VALUE'} +EOF + end + + def command(namespace, *args) + format_simple_command do + admin.create_namespace(namespace, *args) + end + end + end + end +end diff --git a/hbase-server/src/main/ruby/shell/commands/namespace_describe.rb b/hbase-server/src/main/ruby/shell/commands/namespace_describe.rb new file mode 100644 index 0000000..d19476d --- /dev/null +++ b/hbase-server/src/main/ruby/shell/commands/namespace_describe.rb @@ -0,0 +1,41 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class NamespaceDescribe < Command + def help + return <<-EOF +Describe the named namespace. For example: + hbase> namespace_describe 'ns1' +EOF + end + + def command(namespace) + now = Time.now + + desc = admin.describe_namespace(namespace) + + formatter.header([ "DESCRIPTION" ], [ 64 ]) + formatter.row([ desc ], true, [ 64 ]) + formatter.footer(now) + end + end + end +end diff --git a/hbase-server/src/main/ruby/shell/commands/namespace_drop.rb b/hbase-server/src/main/ruby/shell/commands/namespace_drop.rb new file mode 100644 index 0000000..3c9af39 --- /dev/null +++ b/hbase-server/src/main/ruby/shell/commands/namespace_drop.rb @@ -0,0 +1,36 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class NamespaceDrop < Command + def help + return <<-EOF +Drop the named namespace. The namespace must be empty. +EOF + end + + def command(namespace) + format_simple_command do + admin.drop_namespace(namespace) + end + end + end + end +end diff --git a/hbase-server/src/main/ruby/shell/commands/namespace_list.rb b/hbase-server/src/main/ruby/shell/commands/namespace_list.rb new file mode 100644 index 0000000..aa85e0c --- /dev/null +++ b/hbase-server/src/main/ruby/shell/commands/namespace_list.rb @@ -0,0 +1,47 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class NamespaceList < Command + def help + return <<-EOF +List all namespaces in hbase. Optional regular expression parameter could +be used to filter the output. Examples: + + hbase> namespace_list + hbase> namespace_list 'abc.*' +EOF + end + + def command(regex = ".*") + now = Time.now + formatter.header([ "NAMESPACE" ]) + + regex = /#{regex}/ unless regex.is_a?(Regexp) + list = admin.list_namespace.grep(regex) + list.each do |table| + formatter.row([ table ]) + end + + formatter.footer(now, list.size) + end + end + end +end diff --git a/hbase-server/src/main/ruby/shell/commands/namespace_list_tables.rb b/hbase-server/src/main/ruby/shell/commands/namespace_list_tables.rb new file mode 100644 index 0000000..6c78255 --- /dev/null +++ b/hbase-server/src/main/ruby/shell/commands/namespace_list_tables.rb @@ -0,0 +1,45 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class NamespaceListTables < Command + def help + return <<-EOF +List all tables that are members of the namespace. +Examples: + + hbase> namespace_list_tables 'ns1' +EOF + end + + def command(namespace) + now = Time.now + formatter.header([ "TABLE" ]) + + list = admin.list_namespace_tables(namespace) + list.each do |table| + formatter.row([ table ]) + end + + formatter.footer(now, list.size) + end + end + end +end diff --git a/hbase-server/src/test/data/TestNamespaceUpgrade.tgz b/hbase-server/src/test/data/TestNamespaceUpgrade.tgz new file mode 100644 index 0000000..f18430b Binary files /dev/null and b/hbase-server/src/test/data/TestNamespaceUpgrade.tgz differ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index c69c97c..d92ca97 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -161,7 +161,7 @@ public abstract class HBaseTestCase extends TestCase { public HRegion createNewHRegion(HTableDescriptor desc, byte [] startKey, byte [] endKey, Configuration conf) throws IOException { - HRegionInfo hri = new HRegionInfo(desc.getName(), startKey, endKey); + HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey); return HRegion.createHRegion(hri, testDir, conf, desc); } @@ -202,7 +202,7 @@ public abstract class HBaseTestCase extends TestCase { */ protected HTableDescriptor createTableDescriptor(final String name, final int minVersions, final int versions, final int ttl, boolean keepDeleted) { - HTableDescriptor htd = new HTableDescriptor(name); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); for (byte[] cfName : new byte[][]{ fam1, fam2, fam3 }) { htd.addFamily(new HColumnDescriptor(cfName) .setMinVersions(minVersions) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index b3d0501..0fb9a96 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -856,7 +856,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { this.hbaseCluster = new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass); // Don't leave here till we've done a successful scan of the .META. - HTable t = new HTable(c, HConstants.META_TABLE_NAME); + HTable t = new HTable(c, TableName.META_TABLE_NAME); ResultScanner s = t.getScanner(new Scan()); while (s.next() != null) { continue; @@ -878,7 +878,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { public void restartHBaseCluster(int servers) throws IOException, InterruptedException { this.hbaseCluster = new MiniHBaseCluster(this.conf, servers); // Don't leave here till we've done a successful scan of the .META. - HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME); + HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME); ResultScanner s = t.getScanner(new Scan()); while (s.next() != null) { // do nothing @@ -985,7 +985,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * Flushes all caches in the mini hbase cluster * @throws IOException */ - public void flush(byte [] tableName) throws IOException { + public void flush(TableName tableName) throws IOException { getMiniHBaseCluster().flushcache(tableName); } @@ -1001,7 +1001,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * Compact all of a table's reagion in the mini hbase cluster * @throws IOException */ - public void compact(byte [] tableName, boolean major) throws IOException { + public void compact(TableName tableName, boolean major) throws IOException { getMiniHBaseCluster().compact(tableName, major); } @@ -1014,7 +1014,19 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public HTable createTable(String tableName, String family) throws IOException{ - return createTable(tableName, new String[] { family }); + return createTable(TableName.valueOf(tableName), new String[]{family}); + } + + /** + * Create a table. + * @param tableName + * @param family + * @return An HTable instance for the created table. + * @throws IOException + */ + public HTable createTable(byte[] tableName, byte[] family) + throws IOException{ + return createTable(TableName.valueOf(tableName), new byte[][]{family}); } /** @@ -1026,11 +1038,23 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public HTable createTable(String tableName, String[] families) throws IOException { + return createTable(tableName, families); + } + + /** + * Create a table. + * @param tableName + * @param families + * @return An HTable instance for the created table. + * @throws IOException + */ + public HTable createTable(TableName tableName, String[] families) + throws IOException { List fams = new ArrayList(families.length); for (String family : families) { fams.add(Bytes.toBytes(family)); } - return createTable(Bytes.toBytes(tableName), fams.toArray(new byte[0][])); + return createTable(tableName, fams.toArray(new byte[0][])); } /** @@ -1040,11 +1064,12 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(byte[] tableName, byte[] family) + public HTable createTable(TableName tableName, byte[] family) throws IOException{ return createTable(tableName, new byte[][]{family}); } + /** * Create a table. * @param tableName @@ -1058,7 +1083,32 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { new Configuration(getConfiguration())); } + /** + * Create a table. + * @param tableName + * @param families + * @return An HTable instance for the created table. + * @throws IOException + */ + public HTable createTable(TableName tableName, byte[][] families) + throws IOException { + return createTable(tableName, families, + new Configuration(getConfiguration())); + } + public HTable createTable(byte[] tableName, byte[][] families, + int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException { + return createTable(TableName.valueOf(tableName), families, numVersions, + startKey, endKey, numRegions); + } + + public HTable createTable(String tableName, byte[][] families, + int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException { + return createTable(TableName.valueOf(tableName), families, numVersions, + startKey, endKey, numRegions); + } + + public HTable createTable(TableName tableName, byte[][] families, int numVersions, byte[] startKey, byte[] endKey, int numRegions) throws IOException{ HTableDescriptor desc = new HTableDescriptor(tableName); @@ -1081,7 +1131,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(byte[] tableName, byte[][] families, + public HTable createTable(TableName tableName, byte[][] families, final Configuration c) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); @@ -1104,11 +1154,35 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @param tableName * @param families * @param c Configuration to use - * @param numVersions * @return An HTable instance for the created table. * @throws IOException */ public HTable createTable(byte[] tableName, byte[][] families, + final Configuration c) + throws IOException { + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); + for(byte[] family : families) { + HColumnDescriptor hcd = new HColumnDescriptor(family); + // Disable blooms (they are on by default as of 0.95) but we disable them here because + // tests have hard coded counts of what to expect in block cache, etc., and blooms being + // on is interfering. + hcd.setBloomFilterType(BloomType.NONE); + desc.addFamily(hcd); + } + getHBaseAdmin().createTable(desc); + return new HTable(c, tableName); + } + + /** + * Create a table. + * @param tableName + * @param families + * @param c Configuration to use + * @param numVersions + * @return An HTable instance for the created table. + * @throws IOException + */ + public HTable createTable(TableName tableName, byte[][] families, final Configuration c, int numVersions) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); @@ -1126,6 +1200,28 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Create a table. * @param tableName + * @param families + * @param c Configuration to use + * @param numVersions + * @return An HTable instance for the created table. + * @throws IOException + */ + public HTable createTable(byte[] tableName, byte[][] families, + final Configuration c, int numVersions) + throws IOException { + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); + for(byte[] family : families) { + HColumnDescriptor hcd = new HColumnDescriptor(family) + .setMaxVersions(numVersions); + desc.addFamily(hcd); + } + getHBaseAdmin().createTable(desc); + return new HTable(c, tableName); + } + + /** + * Create a table. + * @param tableName * @param family * @param numVersions * @return An HTable instance for the created table. @@ -1139,6 +1235,19 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Create a table. * @param tableName + * @param family + * @param numVersions + * @return An HTable instance for the created table. + * @throws IOException + */ + public HTable createTable(TableName tableName, byte[] family, int numVersions) + throws IOException { + return createTable(tableName, new byte[][]{family}, numVersions); + } + + /** + * Create a table. + * @param tableName * @param families * @param numVersions * @return An HTable instance for the created table. @@ -1147,6 +1256,20 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { public HTable createTable(byte[] tableName, byte[][] families, int numVersions) throws IOException { + return createTable(TableName.valueOf(tableName), families, numVersions); + } + + /** + * Create a table. + * @param tableName + * @param families + * @param numVersions + * @return An HTable instance for the created table. + * @throws IOException + */ + public HTable createTable(TableName tableName, byte[][] families, + int numVersions) + throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions); @@ -1168,6 +1291,20 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public HTable createTable(byte[] tableName, byte[][] families, int numVersions, int blockSize) throws IOException { + return createTable(TableName.valueOf(tableName), + families, numVersions, blockSize); + } + + /** + * Create a table. + * @param tableName + * @param families + * @param numVersions + * @return An HTable instance for the created table. + * @throws IOException + */ + public HTable createTable(TableName tableName, byte[][] families, + int numVersions, int blockSize) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family) @@ -1192,6 +1329,20 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { public HTable createTable(byte[] tableName, byte[][] families, int[] numVersions) throws IOException { + return createTable(TableName.valueOf(tableName), families, numVersions); + } + + /** + * Create a table. + * @param tableName + * @param families + * @param numVersions + * @return An HTable instance for the created table. + * @throws IOException + */ + public HTable createTable(TableName tableName, byte[][] families, + int[] numVersions) + throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); int i = 0; for (byte[] family : families) { @@ -1215,6 +1366,19 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @throws IOException */ public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows) + throws IOException{ + return createTable(TableName.valueOf(tableName), family, splitRows); + } + + /** + * Create a table. + * @param tableName + * @param family + * @param splitRows + * @return An HTable instance for the created table. + * @throws IOException + */ + public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); HColumnDescriptor hcd = new HColumnDescriptor(family); @@ -1235,14 +1399,14 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows) throws IOException { - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); for(byte[] family:families) { HColumnDescriptor hcd = new HColumnDescriptor(family); desc.addFamily(hcd); } getHBaseAdmin().createTable(desc, splitRows); // HBaseAdmin only waits for regions to appear in META we should wait until they are assigned - waitUntilAllRegionsAssigned(tableName); + waitUntilAllRegionsAssigned(TableName.valueOf(tableName)); return new HTable(getConfiguration(), tableName); } @@ -1251,7 +1415,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @param tableName existing table */ public void deleteTable(String tableName) throws IOException { - deleteTable(Bytes.toBytes(tableName)); + deleteTable(TableName.valueOf(tableName)); } /** @@ -1259,21 +1423,40 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @param tableName existing table */ public void deleteTable(byte[] tableName) throws IOException { + deleteTable(TableName.valueOf(tableName)); + } + + /** + * Drop an existing table + * @param tableName existing table + */ + public void deleteTable(TableName tableName) throws IOException { try { getHBaseAdmin().disableTable(tableName); } catch (TableNotEnabledException e) { - LOG.debug("Table: " + Bytes.toString(tableName) + " already disabled, so just deleting it."); + LOG.debug("Table: " + tableName + " already disabled, so just deleting it."); } getHBaseAdmin().deleteTable(tableName); } + /** * Provide an existing table name to truncate * @param tableName existing table * @return HTable to that new table * @throws IOException */ - public HTable truncateTable(byte [] tableName) throws IOException { + public HTable truncateTable(byte[] tableName) throws IOException { + return truncateTable(TableName.valueOf(tableName)); + } + + /** + * Provide an existing table name to truncate + * @param tableName existing table + * @return HTable to that new table + * @throws IOException + */ + public HTable truncateTable(TableName tableName) throws IOException { HTable table = new HTable(getConfiguration(), tableName); Scan scan = new Scan(); ResultScanner resScan = table.getScanner(scan); @@ -1510,7 +1693,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { final byte[] columnFamily, byte [][] startKeys) throws IOException { Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR); - HTable meta = new HTable(c, HConstants.META_TABLE_NAME); + HTable meta = new HTable(c, TableName.META_TABLE_NAME); HTableDescriptor htd = table.getTableDescriptor(); if(!htd.hasFamily(columnFamily)) { HColumnDescriptor hcd = new HColumnDescriptor(columnFamily); @@ -1520,7 +1703,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { // setup already has the ",,123456789" row with an empty start // and end key. Adding the custom regions below adds those blindly, // including the new start region from empty to "bbb". lg - List rows = getMetaTableRows(htd.getName()); + List rows = getMetaTableRows(htd.getTableName()); String regionToDeleteInFS = table .getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0) .getRegionInfo().getEncodedName(); @@ -1529,7 +1712,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { int count = 0; for (int i = 0; i < startKeys.length; i++) { int j = (i + 1) % startKeys.length; - HRegionInfo hri = new HRegionInfo(table.getTableName(), + HRegionInfo hri = new HRegionInfo(table.getName(), startKeys[i], startKeys[j]); MetaEditor.addRegionToMeta(meta, hri); newRegions.add(hri); @@ -1543,7 +1726,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } // remove the "old" region from FS Path tableDir = new Path(getDefaultRootDirPath().toString() - + System.getProperty("file.separator") + htd.getNameAsString() + + System.getProperty("file.separator") + htd.getTableName() + System.getProperty("file.separator") + regionToDeleteInFS); FileSystem.get(c).delete(tableDir); // flush cache of regions @@ -1575,13 +1758,13 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { public List createMultiRegionsInMeta(final Configuration conf, final HTableDescriptor htd, byte [][] startKeys) throws IOException { - HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); + HTable meta = new HTable(conf, TableName.META_TABLE_NAME); Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR); List newRegions = new ArrayList(startKeys.length); // add custom ones for (int i = 0; i < startKeys.length; i++) { int j = (i + 1) % startKeys.length; - HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i], + HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKeys[i], startKeys[j]); MetaEditor.addRegionToMeta(meta, hri); newRegions.add(hri); @@ -1598,7 +1781,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public List getMetaTableRows() throws IOException { // TODO: Redo using MetaReader class - HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME); + HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME); List rows = new ArrayList(); ResultScanner s = t.getScanner(new Scan()); for (Result result : s) { @@ -1616,9 +1799,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * * @throws IOException When reading the rows fails. */ - public List getMetaTableRows(byte[] tableName) throws IOException { + public List getMetaTableRows(TableName tableName) throws IOException { // TODO: Redo using MetaReader. - HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME); + HTable t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME); List rows = new ArrayList(); ResultScanner s = t.getScanner(new Scan()); for (Result result : s) { @@ -1629,7 +1812,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { continue; } - if (Bytes.compareTo(info.getTableName(), tableName) == 0) { + if (info.getTableName().equals(tableName)) { LOG.info("getMetaTableRows: row -> " + Bytes.toStringBinary(result.getRow()) + info); rows.add(result.getRow()); @@ -1652,13 +1835,27 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @throws InterruptedException */ public HRegionServer getRSForFirstRegionInTable(byte[] tableName) - throws IOException, InterruptedException { + throws IOException, InterruptedException { + return getRSForFirstRegionInTable(TableName.valueOf(tableName)); + } + /** + * Tool to get the reference to the region server object that holds the + * region of the specified user table. + * It first searches for the meta rows that contain the region of the + * specified table, then gets the index of that RS, and finally retrieves + * the RS's reference. + * @param tableName user table to lookup in .META. + * @return region server that holds it, null if the row doesn't exist + * @throws IOException + */ + public HRegionServer getRSForFirstRegionInTable(TableName tableName) + throws IOException, InterruptedException { List metaRows = getMetaTableRows(tableName); if (metaRows == null || metaRows.isEmpty()) { return null; } LOG.debug("Found " + metaRows.size() + " rows for table " + - Bytes.toString(tableName)); + tableName); byte [] firstrow = metaRows.get(0); LOG.debug("FirstRow=" + Bytes.toString(firstrow)); long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE, @@ -1903,7 +2100,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { monitor.close(); if (checkStatus) { - new HTable(new Configuration(conf), HConstants.META_TABLE_NAME).close(); + new HTable(new Configuration(conf), TableName.META_TABLE_NAME).close(); } } @@ -2025,7 +2222,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @param maxAttempts maximum number of attempts, unlimited for value of -1 * @return the HRegion chosen, null if none was found within limit of maxAttempts */ - public HRegion getSplittableRegion(byte[] tableName, int maxAttempts) { + public HRegion getSplittableRegion(TableName tableName, int maxAttempts) { List regions = getHBaseCluster().getRegions(tableName); int regCount = regions.size(); Set attempted = new HashSet(); @@ -2173,7 +2370,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { // Below we do a get. The get will retry if a NotServeringRegionException or a // RegionOpeningException. It is crass but when done all will be online. try { - Canary.sniff(admin, Bytes.toString(table)); + Canary.sniff(admin, TableName.valueOf(table)); } catch (Exception e) { throw new IOException(e); } @@ -2297,7 +2494,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @param tableName the table name * @throws IOException */ - public void waitUntilAllRegionsAssigned(final byte[] tableName) throws IOException { + public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException { waitUntilAllRegionsAssigned(tableName, 60000); } @@ -2310,9 +2507,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @param timeout timeout, in milliseconds * @throws IOException */ - public void waitUntilAllRegionsAssigned(final byte[] tableName, final long timeout) + public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout) throws IOException { - final HTable meta = new HTable(getConfiguration(), HConstants.META_TABLE_NAME); + final HTable meta = new HTable(getConfiguration(), TableName.META_TABLE_NAME); try { waitFor(timeout, 200, true, new Predicate() { @Override @@ -2326,7 +2523,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { while ((r = s.next()) != null) { byte [] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); HRegionInfo info = HRegionInfo.parseFromOrNull(b); - if (info != null && Bytes.equals(info.getTableName(), tableName)) { + if (info != null && info.getTableName().equals(tableName)) { b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); allRegionsAssigned &= (b != null); } @@ -2509,8 +2706,6 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { final Random rand = new Random(tableName.hashCode() * 17L + 12938197137L); final int numCF = families.size(); final byte[][] cfBytes = new byte[numCF][]; - final byte[] tableNameBytes = Bytes.toBytes(tableName); - { int cfIndex = 0; for (String cf : families) { @@ -2524,14 +2719,14 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { final int splitStartKey = actualStartKey + keysPerRegion; final int splitEndKey = actualEndKey - keysPerRegion; final String keyFormat = "%08x"; - final HTable table = createTable(tableNameBytes, cfBytes, + final HTable table = createTable(tableName, cfBytes, maxVersions, Bytes.toBytes(String.format(keyFormat, splitStartKey)), Bytes.toBytes(String.format(keyFormat, splitEndKey)), numRegions); if (hbaseCluster != null) { - getMiniHBaseCluster().flushcache(HConstants.META_TABLE_NAME); + getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME); } for (int iFlush = 0; iFlush < numFlushes; ++iFlush) { @@ -2568,7 +2763,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { LOG.info("Initiating flush #" + iFlush + " for table " + tableName); table.flushCommits(); if (hbaseCluster != null) { - getMiniHBaseCluster().flushcache(tableNameBytes); + getMiniHBaseCluster().flushcache(table.getName()); } } @@ -2650,7 +2845,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return the number of regions the table was split into */ public static int createPreSplitLoadTestTable(Configuration conf, - byte[] tableName, byte[] columnFamily, Algorithm compression, + TableName tableName, byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); HColumnDescriptor hcd = new HColumnDescriptor(columnFamily); @@ -2695,7 +2890,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { LOG.error("Master not running", e); throw new IOException(e); } catch (TableExistsException e) { - LOG.warn("Table " + Bytes.toStringBinary(desc.getName()) + + LOG.warn("Table " + desc.getTableName() + " already exists, continuing"); } finally { admin.close(); @@ -2704,7 +2899,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } public static int getMetaRSPort(Configuration conf) throws IOException { - HTable table = new HTable(conf, HConstants.META_TABLE_NAME); + HTable table = new HTable(conf, TableName.META_TABLE_NAME); HRegionLocation hloc = table.getRegionLocation(Bytes.toBytes("")); table.close(); return hloc.getPort(); @@ -2733,10 +2928,10 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { public HRegion createTestRegion(String tableName, HColumnDescriptor hcd) throws IOException { - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); htd.addFamily(hcd); HRegionInfo info = - new HRegionInfo(Bytes.toBytes(tableName), null, null, false); + new HRegionInfo(TableName.valueOf(tableName), null, null, false); HRegion region = HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd); return region; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java index 4bcca41..e5334bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HTestConst.java @@ -34,6 +34,8 @@ public class HTestConst { public static final String DEFAULT_TABLE_STR = "MyTestTable"; public static final byte[] DEFAULT_TABLE_BYTES = Bytes.toBytes(DEFAULT_TABLE_STR); + public static final TableName DEFAULT_TABLE = + TableName.valueOf(DEFAULT_TABLE_BYTES); public static final String DEFAULT_CF_STR = "MyDefaultCF"; public static final byte[] DEFAULT_CF_BYTES = Bytes.toBytes(DEFAULT_CF_STR); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index 6fa9df0..11a02b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.test.MetricsAssertHelper; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; @@ -537,11 +536,11 @@ public class MiniHBaseCluster extends HBaseCluster { * Call flushCache on all regions of the specified table. * @throws IOException */ - public void flushcache(byte [] tableName) throws IOException { + public void flushcache(TableName tableName) throws IOException { for (JVMClusterUtil.RegionServerThread t: this.hbaseCluster.getRegionServers()) { for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) { - if(Bytes.equals(r.getTableDesc().getName(), tableName)) { + if(r.getTableDesc().getTableName().equals(tableName)) { r.flushcache(); } } @@ -565,11 +564,11 @@ public class MiniHBaseCluster extends HBaseCluster { * Call flushCache on all regions of the specified table. * @throws IOException */ - public void compact(byte [] tableName, boolean major) throws IOException { + public void compact(TableName tableName, boolean major) throws IOException { for (JVMClusterUtil.RegionServerThread t: this.hbaseCluster.getRegionServers()) { for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) { - if(Bytes.equals(r.getTableDesc().getName(), tableName)) { + if(r.getTableDesc().getTableName().equals(tableName)) { r.compactStores(major); } } @@ -600,11 +599,15 @@ public class MiniHBaseCluster extends HBaseCluster { } public List getRegions(byte[] tableName) { + return getRegions(TableName.valueOf(tableName)); + } + + public List getRegions(TableName tableName) { List ret = new ArrayList(); for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) { HRegionServer hrs = rst.getRegionServer(); for (HRegion region : hrs.getOnlineRegionsLocalContext()) { - if (Bytes.equals(region.getTableDesc().getName(), tableName)) { + if (region.getTableDesc().getTableName().equals(tableName)) { ret.add(region); } } @@ -683,12 +686,12 @@ public class MiniHBaseCluster extends HBaseCluster { this.hbaseCluster.join(); } - public List findRegionsForTable(byte[] tableName) { + public List findRegionsForTable(TableName tableName) { ArrayList ret = new ArrayList(); for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) { HRegionServer hrs = rst.getRegionServer(); for (HRegion region : hrs.getOnlineRegions(tableName)) { - if (Bytes.equals(region.getTableDesc().getName(), tableName)) { + if (region.getTableDesc().getTableName().equals(tableName)) { ret.add(region); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index 8a453d4..09faef5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -31,8 +31,6 @@ import java.util.Map; import java.util.Random; import java.util.TreeMap; import java.util.Arrays; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.Semaphore; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.lang.reflect.Constructor; @@ -60,7 +58,6 @@ import org.apache.hadoop.hbase.filter.CompareFilter; import org.apache.hadoop.hbase.filter.BinaryComparator; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; @@ -113,7 +110,8 @@ public class PerformanceEvaluation extends Configured implements Tool { private static final int ROWS_PER_GB = ONE_GB / VALUE_LENGTH; public static final byte[] COMPRESSION = Bytes.toBytes("NONE"); - public static final byte[] TABLE_NAME = Bytes.toBytes("TestTable"); + public static final TableName TABLE_NAME = + TableName.valueOf("TestTable"); public static final byte[] FAMILY_NAME = Bytes.toBytes("info"); public static final byte[] QUALIFIER_NAME = Bytes.toBytes("data"); @@ -126,7 +124,7 @@ public class PerformanceEvaluation extends Configured implements Tool { private int rowPrefixLength = DEFAULT_ROW_PREFIX_LENGTH; private int N = 1; private int R = ROWS_PER_GB; - private byte[] tableName = TABLE_NAME; + private TableName tableName = TABLE_NAME; private Compression.Algorithm compression = Compression.Algorithm.NONE; private DataBlockEncoding blockEncoding = DataBlockEncoding.NONE; private boolean flushCommits = true; @@ -482,9 +480,9 @@ public class PerformanceEvaluation extends Configured implements Tool { HTableDescriptor tableDescriptor = getTableDescriptor(); if (this.presplitRegions > 0) { // presplit requested - if (admin.tableExists(tableDescriptor.getName())) { - admin.disableTable(tableDescriptor.getName()); - admin.deleteTable(tableDescriptor.getName()); + if (admin.tableExists(tableDescriptor.getTableName())) { + admin.disableTable(tableDescriptor.getTableName()); + admin.deleteTable(tableDescriptor.getTableName()); } byte[][] splits = getSplits(); @@ -495,13 +493,13 @@ public class PerformanceEvaluation extends Configured implements Tool { LOG.info ("Table created with " + this.presplitRegions + " splits"); } else { - boolean tableExists = admin.tableExists(tableDescriptor.getName()); + boolean tableExists = admin.tableExists(tableDescriptor.getTableName()); if (!tableExists) { admin.createTable(tableDescriptor); LOG.info("Table " + tableDescriptor + " created"); } } - boolean tableExists = admin.tableExists(tableDescriptor.getName()); + boolean tableExists = admin.tableExists(tableDescriptor.getTableName()); return tableExists; } @@ -563,7 +561,7 @@ public class PerformanceEvaluation extends Configured implements Tool { final List threads = new ArrayList(this.N); final long[] timings = new long[this.N]; final int perClientRows = R/N; - final byte[] tableName = this.tableName; + final TableName tableName = this.tableName; final DataBlockEncoding encoding = this.blockEncoding; final boolean flushCommits = this.flushCommits; final Compression.Algorithm compression = this.compression; @@ -746,7 +744,7 @@ public class PerformanceEvaluation extends Configured implements Tool { private int perClientRunRows; private int totalRows; private int numClientThreads; - private byte[] tableName; + private TableName tableName; private boolean flushCommits; private boolean writeToWAL = true; @@ -754,7 +752,7 @@ public class PerformanceEvaluation extends Configured implements Tool { } TestOptions(int startRow, int perClientRunRows, int totalRows, - int numClientThreads, byte[] tableName, + int numClientThreads, TableName tableName, boolean flushCommits, boolean writeToWAL) { this.startRow = startRow; this.perClientRunRows = perClientRunRows; @@ -781,7 +779,7 @@ public class PerformanceEvaluation extends Configured implements Tool { return numClientThreads; } - public byte[] getTableName() { + public TableName getTableName() { return tableName; } @@ -812,7 +810,7 @@ public class PerformanceEvaluation extends Configured implements Tool { protected final int perClientRunRows; protected final int totalRows; private final Status status; - protected byte[] tableName; + protected TableName tableName; protected HTable table; protected volatile Configuration conf; protected boolean flushCommits; @@ -1370,7 +1368,7 @@ public class PerformanceEvaluation extends Configured implements Tool { final String table = "--table="; if (cmd.startsWith(table)) { - this.tableName = Bytes.toBytes(cmd.substring(table.length())); + this.tableName = TableName.valueOf(cmd.substring(table.length())); continue; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCompare.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCompare.java index 5ec7dae..b0665d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCompare.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCompare.java @@ -33,25 +33,25 @@ public class TestCompare extends TestCase { * Sort of HRegionInfo. */ public void testHRegionInfo() { - HRegionInfo a = new HRegionInfo(Bytes.toBytes("a"), null, null); - HRegionInfo b = new HRegionInfo(Bytes.toBytes("b"), null, null); + HRegionInfo a = new HRegionInfo(TableName.valueOf("a"), null, null); + HRegionInfo b = new HRegionInfo(TableName.valueOf("b"), null, null); assertTrue(a.compareTo(b) != 0); - HTableDescriptor t = new HTableDescriptor("t"); + HTableDescriptor t = new HTableDescriptor(TableName.valueOf("t")); byte [] midway = Bytes.toBytes("midway"); - a = new HRegionInfo(t.getName(), null, midway); - b = new HRegionInfo(t.getName(), midway, null); + a = new HRegionInfo(t.getTableName(), null, midway); + b = new HRegionInfo(t.getTableName(), midway, null); assertTrue(a.compareTo(b) < 0); assertTrue(b.compareTo(a) > 0); assertEquals(a, a); assertTrue(a.compareTo(a) == 0); - a = new HRegionInfo(t.getName(), Bytes.toBytes("a"), Bytes.toBytes("d")); - b = new HRegionInfo(t.getName(), Bytes.toBytes("e"), Bytes.toBytes("g")); + a = new HRegionInfo(t.getTableName(), Bytes.toBytes("a"), Bytes.toBytes("d")); + b = new HRegionInfo(t.getTableName(), Bytes.toBytes("e"), Bytes.toBytes("g")); assertTrue(a.compareTo(b) < 0); - a = new HRegionInfo(t.getName(), Bytes.toBytes("aaaa"), Bytes.toBytes("dddd")); - b = new HRegionInfo(t.getName(), Bytes.toBytes("e"), Bytes.toBytes("g")); + a = new HRegionInfo(t.getTableName(), Bytes.toBytes("aaaa"), Bytes.toBytes("dddd")); + b = new HRegionInfo(t.getTableName(), Bytes.toBytes("e"), Bytes.toBytes("g")); assertTrue(a.compareTo(b) < 0); - a = new HRegionInfo(t.getName(), Bytes.toBytes("aaaa"), Bytes.toBytes("dddd")); - b = new HRegionInfo(t.getName(), Bytes.toBytes("aaaa"), Bytes.toBytes("eeee")); + a = new HRegionInfo(t.getTableName(), Bytes.toBytes("aaaa"), Bytes.toBytes("dddd")); + b = new HRegionInfo(t.getTableName(), Bytes.toBytes("aaaa"), Bytes.toBytes("eeee")); assertTrue(a.compareTo(b) < 0); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java index ffb8984..c1723e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java @@ -74,7 +74,6 @@ public class TestDrainingServer { final List families = new ArrayList(1); families.add("family"); TEST_UTIL.createRandomTable("table", families, 1, 0, 0, COUNT_OF_REGIONS, 0); - // Ensure a stable env TEST_UTIL.getHBaseAdmin().setBalancerRunning(false, false); @@ -306,7 +305,7 @@ public class TestDrainingServer { } private static boolean isAllRegionsOnline() { - return TEST_UTIL.getMiniHBaseCluster().countServedRegions() == - (COUNT_OF_REGIONS + 1 /*catalog regions*/); + return TEST_UTIL.getMiniHBaseCluster().countServedRegions() >= + (COUNT_OF_REGIONS + 2 /*catalog and namespace regions*/); } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java index 9381a12..b4085ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java @@ -40,7 +40,7 @@ public class TestFSTableDescriptorForceCreation { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); Path rootdir = new Path(UTIL.getDataTestDir(), name); FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir); - HTableDescriptor htd = new HTableDescriptor(name); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); assertTrue("Should create new table descriptor", fstd.createTableDescriptor(htd, false)); } @@ -65,7 +65,7 @@ public class TestFSTableDescriptorForceCreation { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); Path rootdir = new Path(UTIL.getDataTestDir(), name); FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir); - HTableDescriptor htd = new HTableDescriptor(name); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); fstd.createTableDescriptor(htd, false); assertTrue("Should create new table descriptor", fstd.createTableDescriptor(htd, true)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java index 19d0af7..82a132f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java @@ -63,7 +63,7 @@ public class TestHTableDescriptor { */ @Test public void testGetSetRemoveCP() throws Exception { - HTableDescriptor desc = new HTableDescriptor("table"); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("table")); // simple CP String className = BaseRegionObserver.class.getName(); // add and check that it is present @@ -80,7 +80,7 @@ public class TestHTableDescriptor { */ @Test public void testSetListRemoveCP() throws Exception { - HTableDescriptor desc = new HTableDescriptor("testGetSetRemoveCP"); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("testGetSetRemoveCP")); // simple CP String className1 = BaseRegionObserver.class.getName(); String className2 = SampleRegionWALObserver.class.getName(); @@ -117,7 +117,7 @@ public class TestHTableDescriptor { */ @Test public void testRemoveString() throws Exception { - HTableDescriptor desc = new HTableDescriptor("table"); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("table")); String key = "Some"; String value = "value"; desc.setValue(key, value); @@ -126,13 +126,18 @@ public class TestHTableDescriptor { assertEquals(null, desc.getValue(key)); } - String legalTableNames[] = { "foo", "with-dash_under.dot", "_under_start_ok", }; - String illegalTableNames[] = { ".dot_start_illegal", "-dash_start_illegal", "spaces not ok" }; + String legalTableNames[] = { "foo", "with-dash_under.dot", "_under_start_ok", + "with-dash.with_underscore", "02-01-2012.my_table_01-02", "xyz._mytable_", "9_9_0.table_02" + , "dot1.dot2.table", "new.-mytable", "with-dash.with.dot", "legal..t2", "legal..legal.t2", + "trailingdots..", "trailing.dots...", "ns:mytable", "ns:_mytable_", "ns:my_table_01-02"}; + String illegalTableNames[] = { ".dot_start_illegal", "-dash_start_illegal", "spaces not ok", + "-dash-.start_illegal", "new.table with space", "01 .table", "ns:-illegaldash", + "new:.illegaldot", "new:illegalcolon1:", "new:illegalcolon1:2"}; @Test public void testLegalHTableNames() { for (String tn : legalTableNames) { - HTableDescriptor.isLegalTableName(Bytes.toBytes(tn)); + TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(tn)); } } @@ -140,7 +145,7 @@ public class TestHTableDescriptor { public void testIllegalHTableNames() { for (String tn : illegalTableNames) { try { - HTableDescriptor.isLegalTableName(Bytes.toBytes(tn)); + TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(tn)); fail("invalid tablename " + tn + " should have failed"); } catch (Exception e) { // expected @@ -151,8 +156,9 @@ public class TestHTableDescriptor { @Test public void testLegalHTableNamesRegex() { for (String tn : legalTableNames) { - LOG.info("Testing: '" + tn + "'"); - assertTrue(Pattern.matches(HTableDescriptor.VALID_USER_TABLE_REGEX, tn)); + TableName tName = TableName.valueOf(tn); + assertTrue("Testing: '" + tn + "'", Pattern.matches(TableName.VALID_USER_TABLE_REGEX, + tName.getNameAsString())); } } @@ -160,7 +166,7 @@ public class TestHTableDescriptor { public void testIllegalHTableNamesRegex() { for (String tn : illegalTableNames) { LOG.info("Testing: '" + tn + "'"); - assertFalse(Pattern.matches(HTableDescriptor.VALID_USER_TABLE_REGEX, tn)); + assertFalse(Pattern.matches(TableName.VALID_USER_TABLE_REGEX, tn)); } } @@ -169,7 +175,7 @@ public class TestHTableDescriptor { */ @Test public void testGetMaxFileSize() { - HTableDescriptor desc = new HTableDescriptor("table"); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("table")); assertEquals(-1, desc.getMaxFileSize()); desc.setMaxFileSize(1111L); assertEquals(1111L, desc.getMaxFileSize()); @@ -180,7 +186,7 @@ public class TestHTableDescriptor { */ @Test public void testGetMemStoreFlushSize() { - HTableDescriptor desc = new HTableDescriptor("table"); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("table")); assertEquals(-1, desc.getMemStoreFlushSize()); desc.setMemStoreFlushSize(1111L); assertEquals(1111L, desc.getMemStoreFlushSize()); @@ -191,7 +197,7 @@ public class TestHTableDescriptor { */ @Test public void testAddGetRemoveConfiguration() throws Exception { - HTableDescriptor desc = new HTableDescriptor("table"); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("table")); String key = "Some"; String value = "value"; desc.setConfiguration(key, value); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java index 3e8822c..f08f2fb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java @@ -190,7 +190,8 @@ public class TestIOFencing { } private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private final static byte[] TABLE_NAME = Bytes.toBytes("tabletest"); + private final static TableName TABLE_NAME = + TableName.valueOf("tabletest"); private final static byte[] FAMILY = Bytes.toBytes("family"); private static final int FIRST_BATCH_COUNT = 4000; private static final int SECOND_BATCH_COUNT = FIRST_BATCH_COUNT; @@ -263,7 +264,7 @@ public class TestIOFencing { assertTrue(compactingRegion.countStoreFiles() > 1); final byte REGION_NAME[] = compactingRegion.getRegionName(); LOG.info("Asking for compaction"); - admin.majorCompact(TABLE_NAME); + admin.majorCompact(TABLE_NAME.getName()); LOG.info("Waiting for compaction to be about to start"); compactingRegion.waitForCompactionToBlock(); LOG.info("Starting a new server"); @@ -297,7 +298,7 @@ public class TestIOFencing { // If we survive the split keep going... // Now we make sure that the region isn't totally confused. Load up more rows. TEST_UTIL.loadNumericRows(table, FAMILY, FIRST_BATCH_COUNT, FIRST_BATCH_COUNT + SECOND_BATCH_COUNT); - admin.majorCompact(TABLE_NAME); + admin.majorCompact(TABLE_NAME.getName()); startWaitTime = System.currentTimeMillis(); while (newRegion.compactCount == 0) { Thread.sleep(1000); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java index 4bb53cc..d3f8406 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java @@ -65,7 +65,7 @@ public class TestInfoServers { @Test public void testInfoServersRedirect() throws Exception { // give the cluster time to start up - new HTable(UTIL.getConfiguration(), ".META.").close(); + new HTable(UTIL.getConfiguration(), TableName.META_TABLE_NAME).close(); int port = UTIL.getHBaseCluster().getMaster().getInfoServer().getPort(); assertContainsContent(new URL("http://localhost:" + port + "/index.html"), "master-status"); @@ -85,7 +85,7 @@ public class TestInfoServers { @Test public void testInfoServersStatusPages() throws Exception { // give the cluster time to start up - new HTable(UTIL.getConfiguration(), ".META.").close(); + new HTable(UTIL.getConfiguration(), TableName.META_TABLE_NAME).close(); int port = UTIL.getHBaseCluster().getMaster().getInfoServer().getPort(); assertContainsContent(new URL("http://localhost:" + port + "/master-status"), "META"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java index f1423f2..e7cbbba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMultiVersions.java @@ -92,12 +92,12 @@ public class TestMultiVersions { */ @Test public void testTimestamps() throws Exception { - HTableDescriptor desc = new HTableDescriptor("testTimestamps"); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("testTimestamps")); HColumnDescriptor hcd = new HColumnDescriptor(TimestampTestBase.FAMILY_NAME); hcd.setMaxVersions(3); desc.addFamily(hcd); this.admin.createTable(desc); - HTable table = new HTable(UTIL.getConfiguration(), desc.getName()); + HTable table = new HTable(UTIL.getConfiguration(), desc.getTableName()); // TODO: Remove these deprecated classes or pull them in here if this is // only test using them. Incommon incommon = new HTableIncommon(table); @@ -133,7 +133,7 @@ public class TestMultiVersions { final byte [] value2 = Bytes.toBytes("value2"); final long timestamp1 = 100L; final long timestamp2 = 200L; - final HTableDescriptor desc = new HTableDescriptor(tableName); + final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor hcd = new HColumnDescriptor(contents); hcd.setMaxVersions(3); desc.addFamily(hcd); @@ -194,7 +194,7 @@ public class TestMultiVersions { @Test public void testScanMultipleVersions() throws Exception { final byte [] tableName = Bytes.toBytes("testScanMultipleVersions"); - final HTableDescriptor desc = new HTableDescriptor(tableName); + final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); final byte [][] rows = new byte[][] { Bytes.toBytes("row_0200"), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java new file mode 100644 index 0000000..90216d5 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java @@ -0,0 +1,309 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import com.google.common.collect.Sets; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.IOException; +import java.util.Set; + +import static org.junit.Assert.*; + +@Category(MediumTests.class) +public class TestNamespace { + protected static final Log LOG = LogFactory.getLog(TestNamespace.class); + private static HMaster master; + protected final static int NUM_SLAVES_BASE = 4; + private static HBaseTestingUtility TEST_UTIL; + protected static HBaseAdmin admin; + protected static HBaseCluster cluster; + private static ZKNamespaceManager zkNamespaceManager; + private String prefix = "TestNamespace"; + + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL = new HBaseTestingUtility(); + TEST_UTIL.getConfiguration().setInt("hbase.namespacejanitor.interval", 5000); + TEST_UTIL.startMiniCluster(NUM_SLAVES_BASE); + admin = TEST_UTIL.getHBaseAdmin(); + cluster = TEST_UTIL.getHBaseCluster(); + master = ((MiniHBaseCluster)cluster).getMaster(); + zkNamespaceManager = + new ZKNamespaceManager(master.getZooKeeperWatcher()); + zkNamespaceManager.start(); + LOG.info("Done initializing cluster"); + } + + @AfterClass + public static void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void beforeMethod() throws IOException { + for (HTableDescriptor desc : admin.listTables(prefix+".*")) { + admin.disableTable(desc.getTableName()); + admin.deleteTable(desc.getTableName()); + } + for (NamespaceDescriptor ns : admin.listNamespaceDescriptors()) { + if (ns.getName().startsWith(prefix)) { + admin.deleteNamespace(ns.getName()); + } + } + } + + @Test + public void verifyReservedNS() throws IOException { + //verify existence of reserved namespaces + NamespaceDescriptor ns = + admin.getNamespaceDescriptor(NamespaceDescriptor.DEFAULT_NAMESPACE.getName()); + assertNotNull(ns); + assertEquals(ns.getName(), NamespaceDescriptor.DEFAULT_NAMESPACE.getName()); + assertNotNull(zkNamespaceManager.get(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR)); + + ns = admin.getNamespaceDescriptor(NamespaceDescriptor.SYSTEM_NAMESPACE.getName()); + assertNotNull(ns); + assertEquals(ns.getName(), NamespaceDescriptor.SYSTEM_NAMESPACE.getName()); + assertNotNull(zkNamespaceManager.get(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)); + + assertEquals(2, admin.listNamespaceDescriptors().length); + + //verify existence of system tables + Set systemTables = Sets.newHashSet( + TableName.META_TABLE_NAME, + TableName.NAMESPACE_TABLE_NAME); + HTableDescriptor[] descs = + admin.getTableDescriptorsByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE.getName()); + assertEquals(systemTables.size(), descs.length); + for (HTableDescriptor desc : descs) { + assertTrue(systemTables.contains(desc.getTableName())); + } + //verify system tables aren't listed + assertEquals(0, admin.listTables().length); + + //Try creating default and system namespaces. + boolean exceptionCaught = false; + try { + admin.createNamespace(NamespaceDescriptor.DEFAULT_NAMESPACE); + } catch (IOException exp) { + LOG.warn(exp); + exceptionCaught = true; + } finally { + assertTrue(exceptionCaught); + } + + exceptionCaught = false; + try { + admin.createNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE); + } catch (IOException exp) { + LOG.warn(exp); + exceptionCaught = true; + } finally { + assertTrue(exceptionCaught); + } + } + + @Test + public void testDeleteReservedNS() throws Exception { + boolean exceptionCaught = false; + try { + admin.deleteNamespace(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR); + } catch (IOException exp) { + LOG.warn(exp); + exceptionCaught = true; + } finally { + assertTrue(exceptionCaught); + } + + try { + admin.deleteNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); + } catch (IOException exp) { + LOG.warn(exp); + exceptionCaught = true; + } finally { + assertTrue(exceptionCaught); + } + } + + @Test + public void createRemoveTest() throws Exception { + String testName = "createRemoveTest"; + String nsName = prefix+"_"+testName; + LOG.info(testName); + + //create namespace and verify + admin.createNamespace(NamespaceDescriptor.create(nsName).build()); + assertEquals(3, admin.listNamespaceDescriptors().length); + TEST_UTIL.waitFor(60000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return zkNamespaceManager.list().size() == 3; + } + }); + assertNotNull(zkNamespaceManager.get(nsName)); + //remove namespace and verify + admin.deleteNamespace(nsName); + assertEquals(2, admin.listNamespaceDescriptors().length); + assertEquals(2, zkNamespaceManager.list().size()); + assertNull(zkNamespaceManager.get(nsName)); + } + + @Test + public void createDoubleTest() throws IOException, InterruptedException { + String testName = "createDoubleTest"; + String nsName = prefix+"_"+testName; + LOG.info(testName); + + byte[] tableName = Bytes.toBytes("my_table"); + byte[] tableNameFoo = Bytes.toBytes(nsName+".my_table"); + //create namespace and verify + admin.createNamespace(NamespaceDescriptor.create(nsName).build()); + TEST_UTIL.createTable(tableName, Bytes.toBytes(nsName)); + TEST_UTIL.createTable(tableNameFoo,Bytes.toBytes(nsName)); + assertEquals(2, admin.listTables().length); + assertNotNull(admin + .getTableDescriptor(tableName)); + assertNotNull(admin + .getTableDescriptor(tableNameFoo)); + //remove namespace and verify + admin.disableTable(tableName); + admin.deleteTable(tableName); + assertEquals(1, admin.listTables().length); + } + + @Test + public void createTableTest() throws IOException, InterruptedException { + String testName = "createTableTest"; + String nsName = prefix+"_"+testName; + LOG.info(testName); + + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(nsName+":my_table")); + HColumnDescriptor colDesc = new HColumnDescriptor("my_cf"); + desc.addFamily(colDesc); + try { + admin.createTable(desc); + fail("Expected no namespace constraint exception"); + } catch (ConstraintException ex) { + } + //create table and in new namespace + admin.createNamespace(NamespaceDescriptor.create(nsName).build()); + admin.createTable(desc); + TEST_UTIL.waitTableAvailable(desc.getTableName().getName(), 10000); + FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); + assertTrue(fs.exists( + new Path(master.getMasterFileSystem().getRootDir(), + new Path(HConstants.BASE_NAMESPACE_DIR, + new Path(nsName, desc.getTableName().getQualifierAsString()))))); + assertEquals(1, admin.listTables().length); + + //verify non-empty namespace can't be removed + try { + admin.deleteNamespace(nsName); + fail("Expected non-empty namespace constraint exception"); + } catch (Exception ex) { + LOG.info("Caught expected exception: " + ex); + } + + //sanity check try to write and read from table + HTable table = new HTable(TEST_UTIL.getConfiguration(), desc.getTableName()); + Put p = new Put(Bytes.toBytes("row1")); + p.add(Bytes.toBytes("my_cf"),Bytes.toBytes("my_col"),Bytes.toBytes("value1")); + table.put(p); + //flush and read from disk to make sure directory changes are working + admin.flush(desc.getTableName().getName()); + Get g = new Get(Bytes.toBytes("row1")); + assertTrue(table.exists(g)); + + //normal case of removing namespace + TEST_UTIL.deleteTable(desc.getTableName()); + admin.deleteNamespace(nsName); + } + + @Test + public void createTableInDefaultNamespace() throws Exception { + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("default_table")); + HColumnDescriptor colDesc = new HColumnDescriptor("cf1"); + desc.addFamily(colDesc); + admin.createTable(desc); + assertTrue(admin.listTables().length == 1); + admin.disableTable(desc.getTableName()); + admin.deleteTable(desc.getTableName()); + } + + @Test + public void createTableInSystemNamespace() throws Exception { + String tableName = "hbase:createTableInSystemNamespace"; + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); + HColumnDescriptor colDesc = new HColumnDescriptor("cf1"); + desc.addFamily(colDesc); + admin.createTable(desc); + assertEquals(0, admin.listTables().length); + assertTrue(admin.tableExists(Bytes.toBytes(tableName))); + admin.disableTable(desc.getTableName()); + admin.deleteTable(desc.getTableName()); + } + + @Test + public void testNamespaceJanitor() throws Exception { + FileSystem fs = TEST_UTIL.getTestFileSystem(); + + int fsCount = fs.listStatus(new Path(FSUtils.getRootDir(TEST_UTIL.getConfiguration()), + HConstants.BASE_NAMESPACE_DIR)).length; + Path fakeNSPath = + FSUtils.getNamespaceDir(FSUtils.getRootDir(TEST_UTIL.getConfiguration()), "foo"); + assertTrue(fs.mkdirs(fakeNSPath)); + + String fakeZnode = ZKUtil.joinZNode(ZooKeeperWatcher.namespaceZNode, "foo"); + int zkCount = ZKUtil.listChildrenNoWatch(TEST_UTIL.getZooKeeperWatcher(), + ZooKeeperWatcher.namespaceZNode).size(); + ZKUtil.createWithParents(TEST_UTIL.getZooKeeperWatcher(), fakeZnode); + Thread.sleep(10000); + + //verify namespace count is the same and orphan is removed + assertFalse(fs.exists(fakeNSPath)); + assertEquals(fsCount, fs.listStatus(new Path(FSUtils.getRootDir(TEST_UTIL.getConfiguration()), + HConstants.BASE_NAMESPACE_DIR)).length); + + assertEquals(-1, ZKUtil.checkExists(TEST_UTIL.getZooKeeperWatcher(), fakeZnode)); + assertEquals(zkCount, + ZKUtil.listChildrenNoWatch(TEST_UTIL.getZooKeeperWatcher(), + ZooKeeperWatcher.namespaceZNode).size()); + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java index 89bfd25..fe4dcb7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java @@ -81,7 +81,7 @@ public class TestRegionRebalancing { public void before() throws Exception { UTIL.getConfiguration().set("hbase.master.loadbalancer.class", this.balancerName); UTIL.startMiniCluster(1); - this.desc = new HTableDescriptor("test"); + this.desc = new HTableDescriptor(TableName.valueOf("test")); this.desc.addFamily(new HColumnDescriptor(FAMILY_NAME)); } @@ -97,7 +97,7 @@ public class TestRegionRebalancing { HBaseAdmin admin = new HBaseAdmin(UTIL.getConfiguration()); admin.createTable(this.desc, Arrays.copyOfRange(HBaseTestingUtility.KEYS, 1, HBaseTestingUtility.KEYS.length)); - this.table = new HTable(UTIL.getConfiguration(), this.desc.getName()); + this.table = new HTable(UTIL.getConfiguration(), this.desc.getTableName()); CatalogTracker ct = new CatalogTracker(UTIL.getConfiguration()); ct.start(); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java index 62b002f..d39c3fe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java @@ -97,7 +97,7 @@ public class TestSerialization { byte [] mb = Writables.getBytes(htd); HTableDescriptor deserializedHtd = (HTableDescriptor)Writables.getWritable(mb, new HTableDescriptor()); - assertEquals(htd.getNameAsString(), deserializedHtd.getNameAsString()); + assertEquals(htd.getTableName(), deserializedHtd.getTableName()); } /** @@ -136,12 +136,12 @@ public class TestSerialization { } private HRegionInfo createRandomRegion(final String name) { - HTableDescriptor htd = new HTableDescriptor(name); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); String [] families = new String [] {"info", "anchor"}; for (int i = 0; i < families.length; i++) { htd.addFamily(new HColumnDescriptor(families[i])); } - return new HRegionInfo(htd.getName(), HConstants.EMPTY_START_ROW, + return new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); } @@ -543,7 +543,7 @@ public class TestSerialization { */ protected HTableDescriptor createTableDescriptor(final String name, final int versions) { - HTableDescriptor htd = new HTableDescriptor(name); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); htd.addFamily(new HColumnDescriptor(fam1) .setMaxVersions(versions) .setBlockCacheEnabled(false) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java index 4095ef7..482a9b3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java @@ -232,8 +232,8 @@ public class TestZooKeeper { * @throws Exception */ private void testSanity(final String testName) throws Exception{ - String tableName = testName + "." + System.currentTimeMillis(); - HTableDescriptor desc = new HTableDescriptor(tableName); + String tableName = testName + "_" + System.currentTimeMillis(); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor family = new HColumnDescriptor("fam"); desc.addFamily(family); LOG.info("Creating table " + tableName); @@ -258,10 +258,10 @@ public class TestZooKeeper { public void testMultipleZK() throws IOException, NoSuchMethodException, InvocationTargetException, IllegalAccessException { HTable localMeta = - new HTable(new Configuration(TEST_UTIL.getConfiguration()), HConstants.META_TABLE_NAME); + new HTable(new Configuration(TEST_UTIL.getConfiguration()), TableName.META_TABLE_NAME); Configuration otherConf = new Configuration(TEST_UTIL.getConfiguration()); otherConf.set(HConstants.ZOOKEEPER_QUORUM, "127.0.0.1"); - HTable ipMeta = new HTable(otherConf, HConstants.META_TABLE_NAME); + HTable ipMeta = new HTable(otherConf, TableName.META_TABLE_NAME); // dummy, just to open the connection final byte [] row = new byte [] {'r'}; @@ -483,7 +483,7 @@ public class TestZooKeeper { Bytes.toBytes("c"), Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"), Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"), Bytes.toBytes("j") }; String tableName = "testRegionAssignmentAfterMasterRecoveryDueToZKExpiry"; - admin.createTable(new HTableDescriptor(tableName), SPLIT_KEYS); + admin.createTable(new HTableDescriptor(TableName.valueOf(tableName)), SPLIT_KEYS); ZooKeeperWatcher zooKeeperWatcher = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL); ZKAssign.blockUntilNoRIT(zooKeeperWatcher); m.getZooKeeperWatcher().close(); @@ -519,7 +519,7 @@ public class TestZooKeeper { Bytes.toBytes("3"), Bytes.toBytes("4"), Bytes.toBytes("5") }; String tableName = "testLogSplittingAfterMasterRecoveryDueToZKExpiry"; - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor hcd = new HColumnDescriptor("col"); htd.addFamily(hcd); admin.createTable(htd, SPLIT_KEYS); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java index 3ad628a..49d2324 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MediumTests; @@ -112,7 +113,8 @@ public class TestHFileArchiving { @Test public void testRemovesRegionDirOnArchive() throws Exception { - byte[] TABLE_NAME = Bytes.toBytes("testRemovesRegionDirOnArchive"); + TableName TABLE_NAME = + TableName.valueOf("testRemovesRegionDirOnArchive"); UTIL.createTable(TABLE_NAME, TEST_FAM); final HBaseAdmin admin = UTIL.getHBaseAdmin(); @@ -162,7 +164,8 @@ public class TestHFileArchiving { */ @Test public void testDeleteRegionWithNoStoreFiles() throws Exception { - byte[] TABLE_NAME = Bytes.toBytes("testDeleteRegionWithNoStoreFiles"); + TableName TABLE_NAME = + TableName.valueOf("testDeleteRegionWithNoStoreFiles"); UTIL.createTable(TABLE_NAME, TEST_FAM); // get the current store files for the region @@ -210,7 +213,8 @@ public class TestHFileArchiving { @Test public void testArchiveOnTableDelete() throws Exception { - byte[] TABLE_NAME = Bytes.toBytes("testArchiveOnTableDelete"); + TableName TABLE_NAME = + TableName.valueOf("testArchiveOnTableDelete"); UTIL.createTable(TABLE_NAME, TEST_FAM); List servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME); @@ -288,7 +292,8 @@ public class TestHFileArchiving { */ @Test public void testArchiveOnTableFamilyDelete() throws Exception { - byte[] TABLE_NAME = Bytes.toBytes("testArchiveOnTableFamilyDelete"); + TableName TABLE_NAME = + TableName.valueOf("testArchiveOnTableFamilyDelete"); UTIL.createTable(TABLE_NAME, TEST_FAM); List servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME); @@ -342,7 +347,8 @@ public class TestHFileArchiving { FileSystem fs = UTIL.getTestFileSystem(); Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY); - Path regionDir = new Path("table", "abcdef"); + Path regionDir = new Path(FSUtils.getTableDir(new Path("./"), + TableName.valueOf("table")), "abcdef"); Path familyDir = new Path(regionDir, "cf"); Path sourceRegionDir = new Path(rootDir, regionDir); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java index cc953ea..8163ea8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java @@ -30,6 +30,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -326,10 +327,10 @@ public class TestCatalogTracker { // Make it so we return any old location when asked. final HRegionLocation anyLocation = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, SN); - Mockito.when(connection.getRegionLocation((byte[]) Mockito.any(), + Mockito.when(connection.getRegionLocation((TableName) Mockito.any(), (byte[]) Mockito.any(), Mockito.anyBoolean())). thenReturn(anyLocation); - Mockito.when(connection.locateRegion((byte[]) Mockito.any(), + Mockito.when(connection.locateRegion((TableName) Mockito.any(), (byte[]) Mockito.any())). thenReturn(anyLocation); if (admin != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaMigrationConvertingToPB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaMigrationConvertingToPB.java index 0e39be8..0bbb9f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaMigrationConvertingToPB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaMigrationConvertingToPB.java @@ -36,6 +36,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.migration.NamespaceUpgrade; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -51,6 +53,7 @@ import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.util.ToolRunner; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -118,6 +121,12 @@ public class TestMetaMigrationConvertingToPB { new Path(hbaseRootDir, ".META.").toString()}); // See whats in minihdfs. doFsCommand(shell, new String [] {"-lsr", "/"}); + + //upgrade to namespace as well + Configuration toolConf = TEST_UTIL.getConfiguration(); + conf.set(HConstants.HBASE_DIR, TEST_UTIL.getDefaultRootDirPath().toString()); + ToolRunner.run(toolConf, new NamespaceUpgrade(), new String[]{"--upgrade"}); + TEST_UTIL.startMiniHBaseCluster(1, 1); // Assert we are running against the copied-up filesystem. The copied-up // rootdir should have had a table named 'TestTable' in it. Assert it @@ -182,7 +191,7 @@ public class TestMetaMigrationConvertingToPB { public void testMetaMigration() throws Exception { LOG.info("Starting testMetaMigration"); final byte [] FAMILY = Bytes.toBytes("family"); - HTableDescriptor htd = new HTableDescriptor("testMetaMigration"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testMetaMigration")); HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); htd.addFamily(hcd); Configuration conf = TEST_UTIL.getConfiguration(); @@ -190,7 +199,9 @@ public class TestMetaMigrationConvertingToPB { HConstants.EMPTY_START_ROW, Bytes.toBytes("region_a"), Bytes.toBytes("region_b")}; - createMultiRegionsWithWritableSerialization(conf, htd.getName(), regionNames); + createMultiRegionsWithWritableSerialization(conf, + htd.getTableName().getName(), + regionNames); CatalogTracker ct = TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker(); // Erase the current version of root meta for this test. @@ -228,14 +239,16 @@ public class TestMetaMigrationConvertingToPB { @Test public void testMasterCrashDuringMetaMigration() throws Exception { final byte[] FAMILY = Bytes.toBytes("family"); - HTableDescriptor htd = new HTableDescriptor("testMasterCrashDuringMetaMigration"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf + ("testMasterCrashDuringMetaMigration")); HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); htd.addFamily(hcd); Configuration conf = TEST_UTIL.getConfiguration(); // Create 10 New regions. - createMultiRegionsWithPBSerialization(conf, htd.getName(), 10); + createMultiRegionsWithPBSerialization(conf, htd.getTableName().getName(), 10); // Create 10 Legacy regions. - createMultiRegionsWithWritableSerialization(conf, htd.getName(), 10); + createMultiRegionsWithWritableSerialization(conf, + htd.getTableName().getName(), 10); CatalogTracker ct = TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker(); // Erase the current version of root meta for this test. @@ -315,14 +328,21 @@ public class TestMetaMigrationConvertingToPB { return createMultiRegionsWithWritableSerialization(c, tableName, regionStartKeys); } + public int createMultiRegionsWithWritableSerialization(final Configuration c, + final byte[] tableName, byte [][] startKeys) + throws IOException { + return createMultiRegionsWithWritableSerialization(c, + TableName.valueOf(tableName), startKeys); + } + /** * Inserts multiple regions into META using Writable serialization instead of PB */ public int createMultiRegionsWithWritableSerialization(final Configuration c, - final byte[] tableName, byte [][] startKeys) + final TableName tableName, byte [][] startKeys) throws IOException { Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR); - HTable meta = new HTable(c, HConstants.META_TABLE_NAME); + HTable meta = new HTable(c, TableName.META_TABLE_NAME); List newRegions = new ArrayList(startKeys.length); @@ -388,8 +408,15 @@ public class TestMetaMigrationConvertingToPB { */ int createMultiRegionsWithPBSerialization(final Configuration c, final byte[] tableName, byte [][] startKeys) throws IOException { + return createMultiRegionsWithPBSerialization(c, + TableName.valueOf(tableName), startKeys); + } + + int createMultiRegionsWithPBSerialization(final Configuration c, + final TableName tableName, + byte [][] startKeys) throws IOException { Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR); - HTable meta = new HTable(c, HConstants.META_TABLE_NAME); + HTable meta = new HTable(c, TableName.META_TABLE_NAME); List newRegions = new ArrayList(startKeys.length); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java index 01ccbac..7ba5e4c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java @@ -31,6 +31,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -98,14 +99,14 @@ public class TestMetaReaderEditor { */ @Test public void testRetrying() throws IOException, InterruptedException { - final String name = "testRetrying"; + final TableName name = + TableName.valueOf("testRetrying"); LOG.info("Started " + name); - final byte [] nameBytes = Bytes.toBytes(name); - HTable t = UTIL.createTable(nameBytes, HConstants.CATALOG_FAMILY); + HTable t = UTIL.createTable(name, HConstants.CATALOG_FAMILY); int regionCount = UTIL.createMultiRegions(t, HConstants.CATALOG_FAMILY); // Test it works getting a region from just made user table. final List regions = - testGettingTableRegions(CT, nameBytes, regionCount); + testGettingTableRegions(CT, name, regionCount); MetaTask reader = new MetaTask(CT, "reader") { @Override void metaTask() throws Throwable { @@ -218,24 +219,24 @@ public class TestMetaReaderEditor { @Test public void testGetRegionsCatalogTables() throws IOException, InterruptedException { List regions = - MetaReader.getTableRegions(CT, HConstants.META_TABLE_NAME); + MetaReader.getTableRegions(CT, TableName.META_TABLE_NAME); assertTrue(regions.size() >= 1); assertTrue(MetaReader.getTableRegionsAndLocations(CT, - Bytes.toString(HConstants.META_TABLE_NAME)).size() >= 1); + TableName.META_TABLE_NAME).size() >= 1); } @Test public void testTableExists() throws IOException { - final String name = "testTableExists"; - final byte [] nameBytes = Bytes.toBytes(name); + final TableName name = + TableName.valueOf("testTableExists"); assertFalse(MetaReader.tableExists(CT, name)); - UTIL.createTable(nameBytes, HConstants.CATALOG_FAMILY); + UTIL.createTable(name, HConstants.CATALOG_FAMILY); assertTrue(MetaReader.tableExists(CT, name)); HBaseAdmin admin = UTIL.getHBaseAdmin(); admin.disableTable(name); admin.deleteTable(name); assertFalse(MetaReader.tableExists(CT, name)); assertTrue(MetaReader.tableExists(CT, - Bytes.toString(HConstants.META_TABLE_NAME))); + TableName.META_TABLE_NAME)); } @Test public void testGetRegion() throws IOException, InterruptedException { @@ -251,7 +252,8 @@ public class TestMetaReaderEditor { // Test for the optimization made in HBASE-3650 @Test public void testScanMetaForTable() throws IOException, InterruptedException { - final String name = "testScanMetaForTable"; + final TableName name = + TableName.valueOf("testScanMetaForTable"); LOG.info("Started " + name); /** Create 2 tables @@ -259,21 +261,22 @@ public class TestMetaReaderEditor { - testScanMetaForTablf **/ - UTIL.createTable(Bytes.toBytes(name), HConstants.CATALOG_FAMILY); + UTIL.createTable(name, HConstants.CATALOG_FAMILY); // name that is +1 greater than the first one (e+1=f) - byte[] greaterName = Bytes.toBytes("testScanMetaForTablf"); + TableName greaterName = + TableName.valueOf("testScanMetaForTablf"); UTIL.createTable(greaterName, HConstants.CATALOG_FAMILY); // Now make sure we only get the regions from 1 of the tables at a time - assertEquals(1, MetaReader.getTableRegions(CT, Bytes.toBytes(name)).size()); + assertEquals(1, MetaReader.getTableRegions(CT, name).size()); assertEquals(1, MetaReader.getTableRegions(CT, greaterName).size()); } private static List testGettingTableRegions(final CatalogTracker ct, - final byte [] nameBytes, final int regionCount) + final TableName name, final int regionCount) throws IOException, InterruptedException { - List regions = MetaReader.getTableRegions(ct, nameBytes); + List regions = MetaReader.getTableRegions(ct, name); assertEquals(regionCount, regions.size()); Pair pair = MetaReader.getRegion(ct, regions.get(0).getRegionName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java index 2ae82e0..a82e800 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java @@ -32,6 +32,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -44,7 +45,6 @@ import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; @@ -197,9 +197,9 @@ public class TestMetaReaderEditorNoCluster { // The ugly format below comes of 'Important gotcha on spying real objects!' from // http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html Mockito.doReturn(anyLocation). - when(connection).locateRegion((byte[]) Mockito.any(), (byte[]) Mockito.any()); + when(connection).locateRegion((TableName) Mockito.any(), (byte[]) Mockito.any()); Mockito.doReturn(anyLocation). - when(connection).getRegionLocation((byte[]) Mockito.any(), + when(connection).getRegionLocation((TableName) Mockito.any(), (byte[]) Mockito.any(), Mockito.anyBoolean()); // Now shove our HRI implementation into the spied-upon connection. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java index 8c12def..b5026ff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; @@ -83,7 +84,7 @@ public class HConnectionTestingUtility { * getRegionLocation is called on the mocked connection * @return Mock up a connection that returns a {@link Configuration} when * {@link HConnection#getConfiguration()} is called, a 'location' when - * {@link HConnection#getRegionLocation(byte[], byte[], boolean)} is called, + * {@link HConnection#getRegionLocation(org.apache.hadoop.hbase.TableName, byte[], boolean)} is called, * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when * {@link HConnection#getAdmin(ServerName)} is called, returns the passed * {@link ClientProtos.ClientService.BlockingInterface} instance when @@ -101,10 +102,10 @@ public class HConnectionTestingUtility { Mockito.doNothing().when(c).close(); // Make it so we return a particular location when asked. final HRegionLocation loc = new HRegionLocation(hri, sn); - Mockito.when(c.getRegionLocation((byte[]) Mockito.any(), + Mockito.when(c.getRegionLocation((TableName) Mockito.any(), (byte[]) Mockito.any(), Mockito.anyBoolean())). thenReturn(loc); - Mockito.when(c.locateRegion((byte[]) Mockito.any(), (byte[]) Mockito.any())). + Mockito.when(c.locateRegion((TableName) Mockito.any(), (byte[]) Mockito.any())). thenReturn(loc); if (admin != null) { // If a call to getAdmin, return this implementation. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index 87743af..fe5e294 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -52,12 +52,12 @@ import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.catalog.CatalogTracker; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; @@ -194,8 +194,8 @@ public class TestAdmin { exception = null; try { - HTableDescriptor htd = new HTableDescriptor(nonexistent); - this.admin.modifyTable(htd.getName(), htd); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(nonexistent)); + this.admin.modifyTable(htd.getTableName(), htd); } catch (IOException e) { exception = e; } @@ -205,13 +205,13 @@ public class TestAdmin { // nonexistent column family -- see if we get right exceptions. final String tableName = "testDeleteEditUnknownColumnFamilyAndOrTable" + System.currentTimeMillis(); - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); htd.addFamily(new HColumnDescriptor("cf")); this.admin.createTable(htd); try { exception = null; try { - this.admin.deleteColumn(htd.getName(), nonexistentHcd.getName()); + this.admin.deleteColumn(htd.getTableName(), nonexistentHcd.getName()); } catch (IOException e) { exception = e; } @@ -220,7 +220,7 @@ public class TestAdmin { exception = null; try { - this.admin.modifyColumn(htd.getName(), nonexistentHcd); + this.admin.modifyColumn(htd.getTableName(), nonexistentHcd); } catch (IOException e) { exception = e; } @@ -246,10 +246,10 @@ public class TestAdmin { get.addColumn(HConstants.CATALOG_FAMILY, qualifier); ht.get(get); - this.admin.disableTable(table); + this.admin.disableTable(ht.getName()); assertTrue("Table must be disabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getZKTable().isDisabledTable( - Bytes.toString(table))); + ht.getName())); // Test that table is disabled get = new Get(row); @@ -264,7 +264,7 @@ public class TestAdmin { this.admin.enableTable(table); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getZKTable().isEnabledTable( - Bytes.toString(table))); + ht.getName())); // Test that table is enabled try { @@ -337,7 +337,7 @@ public class TestAdmin { assertEquals(numTables + 1, tables.length); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getZKTable().isEnabledTable( - "testCreateTable")); + TableName.valueOf("testCreateTable"))); } @Test @@ -345,7 +345,7 @@ public class TestAdmin { HColumnDescriptor fam1 = new HColumnDescriptor("fam1"); HColumnDescriptor fam2 = new HColumnDescriptor("fam2"); HColumnDescriptor fam3 = new HColumnDescriptor("fam3"); - HTableDescriptor htd = new HTableDescriptor("myTestTable"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("myTestTable")); htd.addFamily(fam1); htd.addFamily(fam2); htd.addFamily(fam3); @@ -382,7 +382,8 @@ public class TestAdmin { */ @Test public void testOnlineChangeTableSchema() throws IOException, InterruptedException { - final byte [] tableName = Bytes.toBytes("changeTableSchemaOnline"); + final TableName tableName = + TableName.valueOf("changeTableSchemaOnline"); TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration().setBoolean( "hbase.online.schema.update.enable", true); HTableDescriptor [] tables = admin.listTables(); @@ -558,7 +559,7 @@ public class TestAdmin { @Test public void testCreateTableNumberOfRegions() throws IOException, InterruptedException { byte[] tableName = Bytes.toBytes("testCreateTableNumberOfRegions"); - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc); HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName); @@ -567,7 +568,7 @@ public class TestAdmin { ht.close(); byte [] TABLE_2 = Bytes.add(tableName, Bytes.toBytes("_2")); - desc = new HTableDescriptor(TABLE_2); + desc = new HTableDescriptor(TableName.valueOf(TABLE_2)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc, new byte[][]{new byte[]{42}}); HTable ht2 = new HTable(TEST_UTIL.getConfiguration(), TABLE_2); @@ -576,7 +577,7 @@ public class TestAdmin { ht2.close(); byte [] TABLE_3 = Bytes.add(tableName, Bytes.toBytes("_3")); - desc = new HTableDescriptor(TABLE_3); + desc = new HTableDescriptor(TableName.valueOf(TABLE_3)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc, "a".getBytes(), "z".getBytes(), 3); HTable ht3 = new HTable(TEST_UTIL.getConfiguration(), TABLE_3); @@ -585,7 +586,7 @@ public class TestAdmin { ht3.close(); byte [] TABLE_4 = Bytes.add(tableName, Bytes.toBytes("_4")); - desc = new HTableDescriptor(TABLE_4); + desc = new HTableDescriptor(TableName.valueOf(TABLE_4)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); try { admin.createTable(desc, "a".getBytes(), "z".getBytes(), 2); @@ -595,7 +596,7 @@ public class TestAdmin { } byte [] TABLE_5 = Bytes.add(tableName, Bytes.toBytes("_5")); - desc = new HTableDescriptor(TABLE_5); + desc = new HTableDescriptor(TableName.valueOf(TABLE_5)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc, new byte[] {1}, new byte[] {127}, 16); HTable ht5 = new HTable(TEST_UTIL.getConfiguration(), TABLE_5); @@ -622,7 +623,7 @@ public class TestAdmin { }; int expectedRegions = splitKeys.length + 1; - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc, splitKeys); @@ -684,7 +685,7 @@ public class TestAdmin { byte [] TABLE_2 = Bytes.add(tableName, Bytes.toBytes("_2")); - desc = new HTableDescriptor(TABLE_2); + desc = new HTableDescriptor(TableName.valueOf(TABLE_2)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); admin.createTable(desc, startKey, endKey, expectedRegions); @@ -740,7 +741,7 @@ public class TestAdmin { byte [] TABLE_3 = Bytes.add(tableName, Bytes.toBytes("_3")); - desc = new HTableDescriptor(TABLE_3); + desc = new HTableDescriptor(TableName.valueOf(TABLE_3)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); admin.createTable(desc, startKey, endKey, expectedRegions); @@ -766,7 +767,7 @@ public class TestAdmin { }; byte [] TABLE_4 = Bytes.add(tableName, Bytes.toBytes("_4")); - desc = new HTableDescriptor(TABLE_4); + desc = new HTableDescriptor(TableName.valueOf(TABLE_4)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); HBaseAdmin ladmin = new HBaseAdmin(TEST_UTIL.getConfiguration()); try { @@ -782,7 +783,7 @@ public class TestAdmin { @Test public void testTableAvailableWithRandomSplitKeys() throws Exception { byte[] tableName = Bytes.toBytes("testTableAvailableWithRandomSplitKeys"); - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor("col")); byte[][] splitKeys = new byte[1][]; splitKeys = new byte [][] { @@ -799,7 +800,7 @@ public class TestAdmin { byte[] tableName = Bytes.toBytes("testCreateTableWithOnlyEmptyStartRow"); byte[][] splitKeys = new byte[1][]; splitKeys[0] = HConstants.EMPTY_BYTE_ARRAY; - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor("col")); try { admin.createTable(desc, splitKeys); @@ -815,7 +816,7 @@ public class TestAdmin { splitKeys[0] = "region1".getBytes(); splitKeys[1] = HConstants.EMPTY_BYTE_ARRAY; splitKeys[2] = "region2".getBytes(); - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor("col")); try { admin.createTable(desc, splitKeys); @@ -865,7 +866,7 @@ public class TestAdmin { new byte[] { 6, 6, 6 }, new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 } }; int expectedRegions = splitKeys.length + 1; - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc, splitKeys); HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName); @@ -880,7 +881,7 @@ public class TestAdmin { // Check the assignment. HTable metaTable = new HTable(TEST_UTIL.getConfiguration(), - HConstants.META_TABLE_NAME); + TableName.META_TABLE_NAME); List regionInfos = admin.getTableRegions(tableName); Map serverMap = new HashMap(); for (HRegionInfo hri : regionInfos) { @@ -944,12 +945,12 @@ public class TestAdmin { void splitTest(byte[] splitPoint, byte[][] familyNames, int[] rowCounts, int numVersions, int blockSize) throws Exception { + TableName tableName = TableName.valueOf("testForceSplit"); StringBuilder sb = new StringBuilder(); // Add tail to String so can see better in logs where a test is running. for (int i = 0; i < rowCounts.length; i++) { sb.append("_").append(Integer.toString(rowCounts[i])); } - byte [] tableName = Bytes.toBytes("testForceSplit" + sb.toString()); assertFalse(admin.tableExists(tableName)); final HTable table = TEST_UTIL.createTable(tableName, familyNames, numVersions, blockSize); @@ -996,7 +997,7 @@ public class TestAdmin { scanner.next(); // Split the table - this.admin.split(tableName, splitPoint); + this.admin.split(tableName.getName(), splitPoint); final AtomicInteger count = new AtomicInteger(0); Thread t = new Thread("CheckForSplit") { @@ -1097,9 +1098,10 @@ public class TestAdmin { @Test(timeout=300000) public void testEnableDisableAddColumnDeleteColumn() throws Exception { ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL); - byte [] tableName = Bytes.toBytes("testMasterAdmin"); + TableName tableName = TableName.valueOf("testMasterAdmin"); TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); - while (!ZKTableReadOnly.isEnabledTable(zkw, "testMasterAdmin")) { + while (!ZKTableReadOnly.isEnabledTable(zkw, + TableName.valueOf("testMasterAdmin"))) { Thread.sleep(10); } this.admin.disableTable(tableName); @@ -1124,26 +1126,17 @@ public class TestAdmin { public void testCreateBadTables() throws IOException { String msg = null; try { - this.admin.createTable(HTableDescriptor.ROOT_TABLEDESC); - } catch (IllegalArgumentException e) { - msg = e.toString(); - } - assertTrue("Unexcepted exception message " + msg, msg != null && - msg.startsWith(IllegalArgumentException.class.getName()) && - msg.contains(HTableDescriptor.ROOT_TABLEDESC.getNameAsString())); - msg = null; - try { this.admin.createTable(HTableDescriptor.META_TABLEDESC); - } catch(IllegalArgumentException e) { + } catch(TableExistsException e) { msg = e.toString(); } assertTrue("Unexcepted exception message " + msg, msg != null && - msg.startsWith(IllegalArgumentException.class.getName()) && - msg.contains(HTableDescriptor.META_TABLEDESC.getNameAsString())); + msg.startsWith(TableExistsException.class.getName()) && + msg.contains(HTableDescriptor.META_TABLEDESC.getTableName().getNameAsString())); // Now try and do concurrent creation with a bunch of threads. final HTableDescriptor threadDesc = - new HTableDescriptor("threaded_testCreateBadTables"); + new HTableDescriptor(TableName.valueOf("threaded_testCreateBadTables")); threadDesc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); int count = 10; Thread [] threads = new Thread [count]; @@ -1190,8 +1183,8 @@ public class TestAdmin { @Test public void testTableNameClash() throws Exception { String name = "testTableNameClash"; - admin.createTable(new HTableDescriptor(name + "SOMEUPPERCASE")); - admin.createTable(new HTableDescriptor(name)); + admin.createTable(new HTableDescriptor(TableName.valueOf(name + "SOMEUPPERCASE"))); + admin.createTable(new HTableDescriptor(TableName.valueOf(name))); // Before fix, below would fail throwing a NoServerForRegionException. new HTable(TEST_UTIL.getConfiguration(), name).close(); } @@ -1215,7 +1208,7 @@ public class TestAdmin { byte [] startKey = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; byte [] endKey = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; HBaseAdmin hbaseadmin = new HBaseAdmin(TEST_UTIL.getConfiguration()); - hbaseadmin.createTable(new HTableDescriptor(name), startKey, endKey, + hbaseadmin.createTable(new HTableDescriptor(TableName.valueOf(name)), startKey, endKey, expectedRegions); hbaseadmin.close(); } finally { @@ -1248,13 +1241,11 @@ public class TestAdmin { public void testTableNames() throws IOException { byte[][] illegalNames = new byte[][] { Bytes.toBytes("-bad"), - Bytes.toBytes(".bad"), - HConstants.ROOT_TABLE_NAME, - HConstants.META_TABLE_NAME + Bytes.toBytes(".bad") }; for (byte[] illegalName : illegalNames) { try { - new HTableDescriptor(illegalName); + new HTableDescriptor(TableName.valueOf(illegalName)); throw new IOException("Did not detect '" + Bytes.toString(illegalName) + "' as an illegal user table name"); } catch (IllegalArgumentException e) { @@ -1263,7 +1254,7 @@ public class TestAdmin { } byte[] legalName = Bytes.toBytes("g-oo.d"); try { - new HTableDescriptor(legalName); + new HTableDescriptor(TableName.valueOf(legalName)); } catch (IllegalArgumentException e) { throw new IOException("Legal user table name: '" + Bytes.toString(legalName) + "' caused IllegalArgumentException: " + @@ -1324,7 +1315,8 @@ public class TestAdmin { @Test public void testShouldCloseTheRegionBasedOnTheEncodedRegionName() throws Exception { - byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegion"); + TableName TABLENAME = + TableName.valueOf("TestHBACloseRegion"); createTableWithDefaultConf(TABLENAME); HRegionInfo info = null; @@ -1376,7 +1368,8 @@ public class TestAdmin { @Test public void testCloseRegionThatFetchesTheHRIFromMeta() throws Exception { - byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegion2"); + TableName TABLENAME = + TableName.valueOf("TestHBACloseRegion2"); createTableWithDefaultConf(TABLENAME); HRegionInfo info = null; @@ -1483,7 +1476,7 @@ public class TestAdmin { Configuration config = TEST_UTIL.getConfiguration(); HBaseAdmin admin = new HBaseAdmin(config); - HTableDescriptor htd = new HTableDescriptor(TABLENAME); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLENAME)); HColumnDescriptor hcd = new HColumnDescriptor("value"); htd.addFamily(hcd); @@ -1492,6 +1485,10 @@ public class TestAdmin { } private void createTableWithDefaultConf(byte[] TABLENAME) throws IOException { + createTableWithDefaultConf(TableName.valueOf(TABLENAME)); + } + + private void createTableWithDefaultConf(TableName TABLENAME) throws IOException { HTableDescriptor htd = new HTableDescriptor(TABLENAME); HColumnDescriptor hcd = new HColumnDescriptor("value"); htd.addFamily(hcd); @@ -1515,7 +1512,7 @@ public class TestAdmin { byte [] endKey = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }; - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc, startKey, endKey, expectedRegions); @@ -1622,10 +1619,10 @@ public class TestAdmin { throws IOException, InterruptedException { // When the META table can be opened, the region servers are running new HTable( - TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME).close(); + TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME).close(); // Create the test table and open it - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc); HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName); @@ -1698,13 +1695,13 @@ public class TestAdmin { @Test public void testDisableCatalogTable() throws Exception { try { - this.admin.disableTable(".META."); + this.admin.disableTable(TableName.META_TABLE_NAME); fail("Expected to throw IllegalArgumentException"); } catch (IllegalArgumentException e) { } // Before the fix for HBASE-6146, the below table creation was failing as the META table // actually getting disabled by the disableTable() call. - HTableDescriptor htd = new HTableDescriptor("testDisableCatalogTable".getBytes()); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testDisableCatalogTable".getBytes())); HColumnDescriptor hcd = new HColumnDescriptor("cf1".getBytes()); htd.addFamily(hcd); TEST_UTIL.getHBaseAdmin().createTable(htd); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java index d761b58..ba879ec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; @@ -159,7 +160,8 @@ public class TestClientScannerRPCTimeout { } else { ScanResponse scanRes = super.scan(controller, request); String regionName = Bytes.toString(request.getRegion().getValue().toByteArray()); - if (!regionName.contains("-ROOT-") && !regionName.contains(".META.")) { + if (!regionName.contains(TableName.ROOT_TABLE_NAME.getNameAsString()) && + !regionName.contains(TableName.META_TABLE_NAME.getNameAsString())) { tableScannerId = scanRes.getScannerId(); } return scanRes; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java index ab96544..e5cbf35 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java @@ -29,6 +29,8 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.LargeTests; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; @@ -58,7 +60,7 @@ public class TestCloneSnapshotFromClient { private byte[] snapshotName2; private int snapshot0Rows; private int snapshot1Rows; - private byte[] tableName; + private TableName tableName; private HBaseAdmin admin; @BeforeClass @@ -89,7 +91,7 @@ public class TestCloneSnapshotFromClient { this.admin = TEST_UTIL.getHBaseAdmin(); long tid = System.currentTimeMillis(); - tableName = Bytes.toBytes("testtb-" + tid); + tableName = TableName.valueOf("testtb-" + tid); emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid); snapshotName0 = Bytes.toBytes("snaptb0-" + tid); snapshotName1 = Bytes.toBytes("snaptb1-" + tid); @@ -147,13 +149,13 @@ public class TestCloneSnapshotFromClient { @Test public void testCloneSnapshot() throws IOException, InterruptedException { - byte[] clonedTableName = Bytes.toBytes("clonedtb-" + System.currentTimeMillis()); + TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis()); testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows); testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows); testCloneSnapshot(clonedTableName, emptySnapshot, 0); } - private void testCloneSnapshot(final byte[] tableName, final byte[] snapshotName, + private void testCloneSnapshot(final TableName tableName, final byte[] snapshotName, int snapshotRows) throws IOException, InterruptedException { // create a new table from snapshot admin.cloneSnapshot(snapshotName, tableName); @@ -162,13 +164,24 @@ public class TestCloneSnapshotFromClient { TEST_UTIL.deleteTable(tableName); } + @Test + public void testCloneSnapshotCrossNamespace() throws IOException, InterruptedException { + String nsName = "testCloneSnapshotCrossNamespace"; + admin.createNamespace(NamespaceDescriptor.create(nsName).build()); + TableName clonedTableName = + TableName.valueOf(nsName, "clonedtb-" + System.currentTimeMillis()); + testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows); + testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows); + testCloneSnapshot(clonedTableName, emptySnapshot, 0); + } + /** * Verify that tables created from the snapshot are still alive after source table deletion. */ @Test public void testCloneLinksAfterDelete() throws IOException, InterruptedException { // Clone a table from the first snapshot - byte[] clonedTableName = Bytes.toBytes("clonedtb1-" + System.currentTimeMillis()); + TableName clonedTableName = TableName.valueOf("clonedtb1-" + System.currentTimeMillis()); admin.cloneSnapshot(snapshotName0, clonedTableName); SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows); @@ -177,7 +190,7 @@ public class TestCloneSnapshotFromClient { admin.snapshot(snapshotName2, clonedTableName); // Clone the snapshot of the cloned table - byte[] clonedTableName2 = Bytes.toBytes("clonedtb2-" + System.currentTimeMillis()); + TableName clonedTableName2 = TableName.valueOf("clonedtb2-" + System.currentTimeMillis()); admin.cloneSnapshot(snapshotName2, clonedTableName2); SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows); admin.disableTable(clonedTableName2); @@ -204,7 +217,7 @@ public class TestCloneSnapshotFromClient { SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows); // Clone a new table from cloned - byte[] clonedTableName3 = Bytes.toBytes("clonedtb3-" + System.currentTimeMillis()); + TableName clonedTableName3 = TableName.valueOf("clonedtb3-" + System.currentTimeMillis()); admin.cloneSnapshot(snapshotName2, clonedTableName3); SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName3, snapshot0Rows); @@ -217,6 +230,7 @@ public class TestCloneSnapshotFromClient { // ========================================================================== // Helpers // ========================================================================== + private void waitCleanerRun() throws InterruptedException { TEST_UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().choreForTesting(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 73fd6bf..56336f2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -51,6 +51,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -174,7 +175,7 @@ public class TestFromClientSide { HColumnDescriptor hcd = new HColumnDescriptor(FAMILY) .setKeepDeletedCells(true).setMaxVersions(3); - HTableDescriptor desc = new HTableDescriptor(TABLENAME); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TABLENAME)); desc.addFamily(hcd); TEST_UTIL.getHBaseAdmin().createTable(desc); Configuration c = TEST_UTIL.getConfiguration(); @@ -317,7 +318,8 @@ public class TestFromClientSide { @Test public void testRegionCachePreWarm() throws Exception { LOG.info("Starting testRegionCachePreWarm"); - final byte [] TABLENAME = Bytes.toBytes("testCachePrewarm"); + final TableName TABLENAME = + TableName.valueOf("testCachePrewarm"); Configuration conf = TEST_UTIL.getConfiguration(); // Set up test table: @@ -1212,7 +1214,7 @@ public class TestFromClientSide { // Null table name (should NOT work) try { - TEST_UTIL.createTable(null, FAMILY); + TEST_UTIL.createTable((TableName)null, FAMILY); fail("Creating a table with null name passed, should have failed"); } catch(Exception e) {} @@ -4081,7 +4083,7 @@ public class TestFromClientSide { for (int i = 0; i < tables.length && i < size; i++) { boolean found = false; for (int j = 0; j < ts.length; j++) { - if (Bytes.equals(ts[j].getName(), tables[i])) { + if (Bytes.equals(ts[j].getTableName().getName(), tables[i])) { found = true; break; } @@ -4217,7 +4219,7 @@ public class TestFromClientSide { // Test that attribute changes were applied desc = a.getTableDescriptor(); assertTrue("wrong table descriptor returned", - Bytes.compareTo(desc.getName(), tableAname) == 0); + Bytes.compareTo(desc.getTableName().getName(), tableAname) == 0); // check HTD attribute value = desc.getValue(attrName); assertFalse("missing HTD attribute value", value == null); @@ -4392,7 +4394,8 @@ public class TestFromClientSide { @Test public void testIncrementWithDeletes() throws Exception { LOG.info("Starting testIncrementWithDeletes"); - final byte [] TABLENAME = Bytes.toBytes("testIncrementWithDeletes"); + final TableName TABLENAME = + TableName.valueOf("testIncrementWithDeletes"); HTable ht = TEST_UTIL.createTable(TABLENAME, FAMILY); final byte[] COLUMN = Bytes.toBytes("column"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index 1d9e59d..e36ee4e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -31,6 +31,7 @@ import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionLocation; @@ -150,7 +151,8 @@ public class TestFromClientSide3 { TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min", 3); String tableName = "testAdvancedConfigOverride"; - byte[] TABLE = Bytes.toBytes(tableName); + TableName TABLE = + TableName.valueOf(tableName); HTable hTable = TEST_UTIL.createTable(TABLE, FAMILY, 10); HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); HConnection connection = HConnectionManager.getConnection(TEST_UTIL @@ -169,7 +171,7 @@ public class TestFromClientSide3 { server, regionName, FAMILY).size() > 1); // Issue a compaction request - admin.compact(TABLE); + admin.compact(TABLE.getName()); // poll wait for the compactions to happen for (int i = 0; i < 10 * 1000 / 40; ++i) { @@ -205,7 +207,7 @@ public class TestFromClientSide3 { performMultiplePutAndFlush(admin, hTable, row, FAMILY, 3, 10); // Issue a compaction request - admin.compact(TABLE); + admin.compact(TABLE.getName()); // This time, the compaction request should not happen Thread.sleep(10 * 1000); @@ -229,7 +231,7 @@ public class TestFromClientSide3 { LOG.info("alter status finished"); // Issue a compaction request - admin.compact(TABLE); + admin.compact(TABLE.getName()); // poll wait for the compactions to happen for (int i = 0; i < 10 * 1000 / 40; ++i) { @@ -380,7 +382,7 @@ public class TestFromClientSide3 { public void testGetEmptyRow() throws Exception { //Create a table and put in 1 row HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); - HTableDescriptor desc = new HTableDescriptor(Bytes.toBytes("test")); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("test"))); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); HTable table = new HTable(TEST_UTIL.getConfiguration(), "test"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java index fb7adf5..6a6c3d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.PleaseHoldException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos; import org.junit.Test; @@ -71,7 +72,8 @@ public class TestHBaseAdminNoCluster { // Mock up our admin Interfaces HBaseAdmin admin = new HBaseAdmin(configuration); try { - HTableDescriptor htd = new HTableDescriptor("testMasterMonitorCollableRetries"); + HTableDescriptor htd = + new HTableDescriptor(TableName.valueOf("testMasterMonitorCollableRetries")); // Pass any old htable descriptor; not important try { admin.createTable(htd, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java index ef54220..cbc3fc8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java @@ -42,6 +42,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -81,11 +82,16 @@ import com.google.common.collect.Lists; public class TestHCM { private static final Log LOG = LogFactory.getLog(TestHCM.class); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final byte[] TABLE_NAME = Bytes.toBytes("test"); - private static final byte[] TABLE_NAME1 = Bytes.toBytes("test1"); - private static final byte[] TABLE_NAME2 = Bytes.toBytes("test2"); - private static final byte[] TABLE_NAME3 = Bytes.toBytes("test3"); - private static final byte[] TABLE_NAME4 = Bytes.toBytes("test4"); + private static final TableName TABLE_NAME = + TableName.valueOf("test"); + private static final TableName TABLE_NAME1 = + TableName.valueOf("test1"); + private static final TableName TABLE_NAME2 = + TableName.valueOf("test2"); + private static final TableName TABLE_NAME3 = + TableName.valueOf("test3"); + private static final TableName TABLE_NAME4 = + TableName.valueOf("test4"); private static final byte[] FAM_NAM = Bytes.toBytes("f"); private static final byte[] ROW = Bytes.toBytes("bbb"); private static final byte[] ROW_X = Bytes.toBytes("xxx"); @@ -112,7 +118,8 @@ public class TestHCM { @Ignore ("Fails in IDEs: HBASE-9042") @Test(expected = RegionServerStoppedException.class) public void testClusterStatus() throws Exception { - byte[] tn = "testClusterStatus".getBytes(); + TableName tn = + TableName.valueOf("testClusterStatus"); byte[] cf = "cf".getBytes(); byte[] rk = "rk1".getBytes(); @@ -121,7 +128,7 @@ public class TestHCM { final ServerName sn = rs.getRegionServer().getServerName(); HTable t = TEST_UTIL.createTable(tn, cf); - TEST_UTIL.waitTableAvailable(tn); + TEST_UTIL.waitTableAvailable(tn.getName()); while(TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager(). getRegionStates().isRegionsInTransition()){ @@ -287,7 +294,7 @@ public class TestHCM { HTable table = new HTable(conf, TABLE_NAME); TEST_UTIL.createMultiRegions(table, FAM_NAM); - TEST_UTIL.waitUntilAllRegionsAssigned(table.getTableName()); + TEST_UTIL.waitUntilAllRegionsAssigned(table.getName()); Put put = new Put(ROW); put.add(FAM_NAM, ROW, ROW); table.put(put); @@ -295,9 +302,6 @@ public class TestHCM { (HConnectionManager.HConnectionImplementation)table.getConnection(); assertNotNull(conn.getCachedLocation(TABLE_NAME, ROW)); - assertNotNull(conn.getCachedLocation(TABLE_NAME.clone(), ROW.clone())); - assertNotNull(conn.getCachedLocation( - Bytes.toString(TABLE_NAME).getBytes() , Bytes.toString(ROW).getBytes())); final int nextPort = conn.getCachedLocation(TABLE_NAME, ROW).getPort() + 1; HRegionLocation loc = conn.getCachedLocation(TABLE_NAME, ROW); @@ -305,7 +309,7 @@ public class TestHCM { HConstants.LATEST_TIMESTAMP), HConstants.LATEST_TIMESTAMP); Assert.assertEquals(conn.getCachedLocation(TABLE_NAME, ROW).getPort(), nextPort); - conn.forceDeleteCachedLocation(TABLE_NAME.clone(), ROW.clone()); + conn.forceDeleteCachedLocation(TABLE_NAME, ROW.clone()); HRegionLocation rl = conn.getCachedLocation(TABLE_NAME, ROW); assertNull("What is this location?? " + rl, rl); @@ -317,6 +321,7 @@ public class TestHCM { put2.add(FAM_NAM, ROW, ROW); table.put(put2); assertNotNull(conn.getCachedLocation(TABLE_NAME, ROW)); + assertNotNull(conn.getCachedLocation(TableName.valueOf(TABLE_NAME.getName()), ROW.clone())); TEST_UTIL.getHBaseAdmin().setBalancerRunning(false, false); HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java index b81126f..2f0bf37 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java @@ -27,6 +27,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -64,7 +65,8 @@ public class TestHTableMultiplexer { @Test public void testHTableMultiplexer() throws Exception { - byte[] TABLE = Bytes.toBytes("testHTableMultiplexer"); + TableName TABLE = + TableName.valueOf("testHTableMultiplexer"); final int NUM_REGIONS = 10; final int VERSION = 3; List failedPuts; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java index 3c51bc8..4c1f518 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java @@ -261,7 +261,7 @@ public class TestHTablePool { admin.deleteTable(TABLENAME); } - HTableDescriptor tableDescriptor = new HTableDescriptor(TABLENAME); + HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(TABLENAME)); tableDescriptor.addFamily(new HColumnDescriptor("randomFamily")); admin.createTable(tableDescriptor); @@ -337,7 +337,7 @@ public class TestHTablePool { admin.deleteTable(TABLENAME); } - HTableDescriptor tableDescriptor = new HTableDescriptor(TABLENAME); + HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(TABLENAME)); tableDescriptor.addFamily(new HColumnDescriptor("randomFamily")); admin.createTable(tableDescriptor); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java index 30e3725..4576deb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTestConst; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.junit.Test; @@ -51,8 +52,8 @@ public class TestIntraRowPagination { byte [][] FAMILIES = HTestConst.makeNAscii(HTestConst.DEFAULT_CF_BYTES, 3); byte [][] QUALIFIERS = HTestConst.makeNAscii(HTestConst.DEFAULT_QUALIFIER_BYTES, 10); - HTableDescriptor htd = new HTableDescriptor(HTestConst.DEFAULT_TABLE_BYTES); - HRegionInfo info = new HRegionInfo(HTestConst.DEFAULT_TABLE_BYTES, null, null, false); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(HTestConst.DEFAULT_TABLE_BYTES)); + HRegionInfo info = new HRegionInfo(HTestConst.DEFAULT_TABLE, null, null, false); for (byte[] family : FAMILIES) { HColumnDescriptor hcd = new HColumnDescriptor(family); htd.addFamily(hcd); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java index af6619b..38a9fcb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java @@ -34,6 +34,7 @@ import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -67,7 +68,8 @@ public class TestMetaScanner { public void testMetaScanner() throws Exception { LOG.info("Starting testMetaScanner"); setUp(); - final byte[] TABLENAME = Bytes.toBytes("testMetaScanner"); + final TableName TABLENAME = + TableName.valueOf("testMetaScanner"); final byte[] FAMILY = Bytes.toBytes("family"); TEST_UTIL.createTable(TABLENAME, FAMILY); Configuration conf = TEST_UTIL.getConfiguration(); @@ -121,7 +123,8 @@ public class TestMetaScanner { final long runtime = 30 * 1000; //30 sec LOG.info("Starting testConcurrentMetaScannerAndCatalogJanitor"); - final byte[] TABLENAME = Bytes.toBytes("testConcurrentMetaScannerAndCatalogJanitor"); + final TableName TABLENAME = + TableName.valueOf("testConcurrentMetaScannerAndCatalogJanitor"); final byte[] FAMILY = Bytes.toBytes("family"); TEST_UTIL.createTable(TABLENAME, FAMILY); final CatalogTracker catalogTracker = mock(CatalogTracker.class); @@ -139,7 +142,7 @@ public class TestMetaScanner { //select a random region HRegionInfo parent = regions.get(random.nextInt(regions.size())); - if (parent == null || !Bytes.equals(TABLENAME, parent.getTableName())) { + if (parent == null || !TABLENAME.equals(parent.getTableName())) { continue; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java index 6f4666b..c924a41 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java @@ -80,8 +80,9 @@ public class TestMultipleTimestamps { @Test public void testReseeksWithOneColumnMiltipleTimestamp() throws IOException { - byte [] TABLE = Bytes.toBytes("testReseeksWithOne" + - "ColumnMiltipleTimestamps"); + TableName TABLE = + TableName.valueOf("testReseeksWithOne" + + "ColumnMiltipleTimestamps"); byte [] FAMILY = Bytes.toBytes("event_log"); byte [][] FAMILIES = new byte[][] { FAMILY }; @@ -121,8 +122,9 @@ public class TestMultipleTimestamps { @Test public void testReseeksWithMultipleColumnOneTimestamp() throws IOException { LOG.info("testReseeksWithMultipleColumnOneTimestamp"); - byte [] TABLE = Bytes.toBytes("testReseeksWithMultiple" + - "ColumnOneTimestamps"); + TableName TABLE = + TableName.valueOf("testReseeksWithMultiple" + + "ColumnOneTimestamps"); byte [] FAMILY = Bytes.toBytes("event_log"); byte [][] FAMILIES = new byte[][] { FAMILY }; @@ -162,7 +164,8 @@ public class TestMultipleTimestamps { IOException { LOG.info("testReseeksWithMultipleColumnMultipleTimestamp"); - byte [] TABLE = Bytes.toBytes("testReseeksWithMultipleColumnMiltipleTimestamps"); + TableName TABLE = + TableName.valueOf("testReseeksWithMultipleColumnMiltipleTimestamps"); byte [] FAMILY = Bytes.toBytes("event_log"); byte [][] FAMILIES = new byte[][] { FAMILY }; @@ -214,7 +217,8 @@ public class TestMultipleTimestamps { @Test public void testReseeksWithMultipleFiles() throws IOException { LOG.info("testReseeksWithMultipleFiles"); - byte [] TABLE = Bytes.toBytes("testReseeksWithMultipleFiles"); + TableName TABLE = + TableName.valueOf("testReseeksWithMultipleFiles"); byte [] FAMILY = Bytes.toBytes("event_log"); byte [][] FAMILIES = new byte[][] { FAMILY }; @@ -282,7 +286,9 @@ public class TestMultipleTimestamps { public void testWithVersionDeletes(boolean flushTables) throws IOException { LOG.info("testWithVersionDeletes_"+ (flushTables ? "flush" : "noflush")); - byte [] TABLE = Bytes.toBytes("testWithVersionDeletes_" + (flushTables ? "flush" : "noflush")); + TableName TABLE = + TableName.valueOf("testWithVersionDeletes_" + (flushTables ? + "flush" : "noflush")); byte [] FAMILY = Bytes.toBytes("event_log"); byte [][] FAMILIES = new byte[][] { FAMILY }; @@ -315,7 +321,8 @@ public class TestMultipleTimestamps { public void testWithMultipleVersionDeletes() throws IOException { LOG.info("testWithMultipleVersionDeletes"); - byte [] TABLE = Bytes.toBytes("testWithMultipleVersionDeletes"); + TableName TABLE = + TableName.valueOf("testWithMultipleVersionDeletes"); byte [] FAMILY = Bytes.toBytes("event_log"); byte [][] FAMILIES = new byte[][] { FAMILY }; @@ -340,7 +347,8 @@ public class TestMultipleTimestamps { @Test public void testWithColumnDeletes() throws IOException { - byte [] TABLE = Bytes.toBytes("testWithColumnDeletes"); + TableName TABLE = + TableName.valueOf("testWithColumnDeletes"); byte [] FAMILY = Bytes.toBytes("event_log"); byte [][] FAMILIES = new byte[][] { FAMILY }; @@ -365,7 +373,8 @@ public class TestMultipleTimestamps { @Test public void testWithFamilyDeletes() throws IOException { - byte [] TABLE = Bytes.toBytes("testWithFamilyDeletes"); + TableName TABLE = + TableName.valueOf("testWithFamilyDeletes"); byte [] FAMILY = Bytes.toBytes("event_log"); byte [][] FAMILIES = new byte[][] { FAMILY }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java index dad1674..86502e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java @@ -27,6 +27,7 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -62,7 +63,7 @@ public class TestRestoreSnapshotFromClient { private byte[] snapshotName2; private int snapshot0Rows; private int snapshot1Rows; - private byte[] tableName; + private TableName tableName; private HBaseAdmin admin; @BeforeClass @@ -93,7 +94,8 @@ public class TestRestoreSnapshotFromClient { this.admin = TEST_UTIL.getHBaseAdmin(); long tid = System.currentTimeMillis(); - tableName = Bytes.toBytes("testtb-" + tid); + tableName = + TableName.valueOf("testtb-" + tid); emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid); snapshotName0 = Bytes.toBytes("snaptb0-" + tid); snapshotName1 = Bytes.toBytes("snaptb1-" + tid); @@ -221,7 +223,8 @@ public class TestRestoreSnapshotFromClient { @Test public void testRestoreSnapshotOfCloned() throws IOException, InterruptedException { - byte[] clonedTableName = Bytes.toBytes("clonedtb-" + System.currentTimeMillis()); + TableName clonedTableName = + TableName.valueOf("clonedtb-" + System.currentTimeMillis()); admin.cloneSnapshot(snapshotName0, clonedTableName); SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows); admin.disableTable(clonedTableName); @@ -241,10 +244,10 @@ public class TestRestoreSnapshotFromClient { TEST_UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().choreForTesting(); } - private Set getFamiliesFromFS(final byte[] tableName) throws IOException { + private Set getFamiliesFromFS(final TableName tableName) throws IOException { MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); Set families = new HashSet(); - Path tableDir = HTableDescriptor.getTableDir(mfs.getRootDir(), tableName); + Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); for (Path regionDir: FSUtils.getRegionDirs(mfs.getFileSystem(), tableDir)) { for (Path familyDir: FSUtils.getFamilyDirs(mfs.getFileSystem(), regionDir)) { families.add(familyDir.getName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java index dc7ee4e..30b682b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java @@ -26,6 +26,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -190,25 +191,26 @@ public class TestSnapshotCloneIndependence { HBaseAdmin admin = UTIL.getHBaseAdmin(); final long startTime = System.currentTimeMillis(); - final String localTableNameAsString = STRING_TABLE_NAME + startTime; + final TableName localTableName = + TableName.valueOf(STRING_TABLE_NAME + startTime); - HTable original = UTIL.createTable(Bytes.toBytes(localTableNameAsString), TEST_FAM); + HTable original = UTIL.createTable(localTableName, TEST_FAM); try { UTIL.loadTable(original, TEST_FAM); final int origTableRowCount = UTIL.countRows(original); // Take a snapshot - final String snapshotNameAsString = "snapshot_" + localTableNameAsString; + final String snapshotNameAsString = "snapshot_" + localTableName; byte[] snapshotName = Bytes.toBytes(snapshotNameAsString); - SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableNameAsString, TEST_FAM_STR, + SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, TEST_FAM_STR, snapshotNameAsString, rootDir, fs, online); if (!online) { - admin.enableTable(localTableNameAsString); + admin.enableTable(localTableName); } - byte[] cloneTableName = Bytes.toBytes("test-clone-" + localTableNameAsString); + byte[] cloneTableName = Bytes.toBytes("test-clone-" + localTableName); admin.cloneSnapshot(snapshotName, cloneTableName); HTable clonedTable = new HTable(UTIL.getConfiguration(), cloneTableName); @@ -267,23 +269,24 @@ public class TestSnapshotCloneIndependence { // Create a table HBaseAdmin admin = UTIL.getHBaseAdmin(); final long startTime = System.currentTimeMillis(); - final String localTableNameAsString = STRING_TABLE_NAME + startTime; - HTable original = UTIL.createTable(Bytes.toBytes(localTableNameAsString), TEST_FAM); + final TableName localTableName = + TableName.valueOf(STRING_TABLE_NAME + startTime); + HTable original = UTIL.createTable(localTableName, TEST_FAM); UTIL.loadTable(original, TEST_FAM); final int loadedTableCount = UTIL.countRows(original); System.out.println("Original table has: " + loadedTableCount + " rows"); - final String snapshotNameAsString = "snapshot_" + localTableNameAsString; + final String snapshotNameAsString = "snapshot_" + localTableName; // Create a snapshot - SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableNameAsString, TEST_FAM_STR, + SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, TEST_FAM_STR, snapshotNameAsString, rootDir, fs, online); if (!online) { - admin.enableTable(localTableNameAsString); + admin.enableTable(localTableName); } - byte[] cloneTableName = Bytes.toBytes("test-clone-" + localTableNameAsString); + byte[] cloneTableName = Bytes.toBytes("test-clone-" + localTableName); // Clone the snapshot byte[] snapshotName = Bytes.toBytes(snapshotNameAsString); @@ -291,8 +294,7 @@ public class TestSnapshotCloneIndependence { // Verify that region information is the same pre-split original.clearRegionCache(); - List originalTableHRegions = admin.getTableRegions(Bytes - .toBytes(localTableNameAsString)); + List originalTableHRegions = admin.getTableRegions(localTableName); final int originalRegionCount = originalTableHRegions.size(); final int cloneTableRegionCount = admin.getTableRegions(cloneTableName).size(); @@ -323,20 +325,21 @@ public class TestSnapshotCloneIndependence { // Create a table HBaseAdmin admin = UTIL.getHBaseAdmin(); final long startTime = System.currentTimeMillis(); - final String localTableNameAsString = STRING_TABLE_NAME + startTime; - HTable original = UTIL.createTable(Bytes.toBytes(localTableNameAsString), TEST_FAM); + final TableName localTableName = + TableName.valueOf(STRING_TABLE_NAME + startTime); + HTable original = UTIL.createTable(localTableName, TEST_FAM); UTIL.loadTable(original, TEST_FAM); - final String snapshotNameAsString = "snapshot_" + localTableNameAsString; + final String snapshotNameAsString = "snapshot_" + localTableName; // Create a snapshot - SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableNameAsString, TEST_FAM_STR, + SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, TEST_FAM_STR, snapshotNameAsString, rootDir, fs, online); if (!online) { - admin.enableTable(localTableNameAsString); + admin.enableTable(localTableName); } - byte[] cloneTableName = Bytes.toBytes("test-clone-" + localTableNameAsString); + byte[] cloneTableName = Bytes.toBytes("test-clone-" + localTableName); // Clone the snapshot byte[] snapshotName = Bytes.toBytes(snapshotNameAsString); @@ -346,11 +349,11 @@ public class TestSnapshotCloneIndependence { byte[] TEST_FAM_2 = Bytes.toBytes("fam2"); HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM_2); - admin.disableTable(localTableNameAsString); - admin.addColumn(localTableNameAsString, hcd); + admin.disableTable(localTableName); + admin.addColumn(localTableName, hcd); // Verify that it is not in the snapshot - admin.enableTable(localTableNameAsString); + admin.enableTable(localTableName); // get a description of the cloned table // get a list of its families diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java index c4be132..3565d5a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; -import java.io.IOException; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -30,6 +29,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.LargeTests; @@ -61,7 +61,8 @@ public class TestSnapshotFromClient { private static final int NUM_RS = 2; private static final String STRING_TABLE_NAME = "test"; private static final byte[] TEST_FAM = Bytes.toBytes("fam"); - private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME); + private static final TableName TABLE_NAME = + TableName.valueOf(STRING_TABLE_NAME); /** * Setup the config for the cluster @@ -121,14 +122,14 @@ public class TestSnapshotFromClient { byte[] snapshotName = Bytes.toBytes("metaSnapshot"); try { - admin.snapshot(snapshotName, HConstants.META_TABLE_NAME); + admin.snapshot(snapshotName, TableName.META_TABLE_NAME); fail("taking a snapshot of .META. should not be allowed"); } catch (IllegalArgumentException e) { // expected } try { - admin.snapshot(snapshotName, HConstants.ROOT_TABLE_NAME); + admin.snapshot(snapshotName, TableName.ROOT_TABLE_NAME); fail("taking a snapshot of -ROOT- should not be allowed"); } catch (IllegalArgumentException e) { // expected diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java index 13f57dc..1ea5d1a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotMetadata.java @@ -27,6 +27,7 @@ import static org.junit.Assert.assertTrue; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -82,8 +83,7 @@ public class TestSnapshotMetadata { private HBaseAdmin admin; private String originalTableDescription; private HTableDescriptor originalTableDescriptor; - private byte[] originalTableName; - private String originalTableNameAsString; + TableName originalTableName; private static FileSystem fs; private static Path rootDir; @@ -136,7 +136,7 @@ public class TestSnapshotMetadata { final long startTime = System.currentTimeMillis(); final String sourceTableNameAsString = STRING_TABLE_NAME + startTime; - originalTableName = Bytes.toBytes(sourceTableNameAsString); + originalTableName = TableName.valueOf(sourceTableNameAsString); // enable replication on a column family HColumnDescriptor maxVersionsColumn = new HColumnDescriptor(MAX_VERSIONS_FAM); @@ -149,7 +149,7 @@ public class TestSnapshotMetadata { dataBlockColumn.setDataBlockEncoding(DATA_BLOCK_ENCODING_TYPE); blockSizeColumn.setBlocksize(BLOCK_SIZE); - HTableDescriptor htd = new HTableDescriptor(sourceTableNameAsString); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(sourceTableNameAsString)); htd.addFamily(maxVersionsColumn); htd.addFamily(bloomFilterColumn); htd.addFamily(dataBlockColumn); @@ -160,8 +160,7 @@ public class TestSnapshotMetadata { admin.createTable(htd); HTable original = new HTable(UTIL.getConfiguration(), originalTableName); - - originalTableNameAsString = sourceTableNameAsString; + originalTableName = TableName.valueOf(sourceTableNameAsString); originalTableDescriptor = admin.getTableDescriptor(originalTableName); originalTableDescription = originalTableDescriptor.toStringCustomizedValues(); @@ -175,9 +174,9 @@ public class TestSnapshotMetadata { @Test (timeout=300000) public void testDescribeMatchesAfterClone() throws Exception { // Clone the original table - final String clonedTableNameAsString = "clone" + originalTableNameAsString; + final String clonedTableNameAsString = "clone" + originalTableName; final byte[] clonedTableName = Bytes.toBytes(clonedTableNameAsString); - final String snapshotNameAsString = "snapshot" + originalTableNameAsString + final String snapshotNameAsString = "snapshot" + originalTableName + System.currentTimeMillis(); final byte[] snapshotName = Bytes.toBytes(snapshotNameAsString); @@ -188,14 +187,14 @@ public class TestSnapshotMetadata { } // Create a snapshot in which all families are empty - SnapshotTestingUtils.createSnapshotAndValidate(admin, originalTableNameAsString, null, + SnapshotTestingUtils.createSnapshotAndValidate(admin, originalTableName, null, familiesList, snapshotNameAsString, rootDir, fs); admin.cloneSnapshot(snapshotName, clonedTableName); HTable clonedTable = new HTable(UTIL.getConfiguration(), clonedTableName); HTableDescriptor cloneHtd = admin.getTableDescriptor(clonedTableName); assertEquals( - originalTableDescription.replace(originalTableNameAsString, clonedTableNameAsString), + originalTableDescription.replace(originalTableName.getNameAsString(),clonedTableNameAsString), cloneHtd.toStringCustomizedValues()); // Verify the custom fields @@ -273,11 +272,11 @@ public class TestSnapshotMetadata { } // take a snapshot - final String snapshotNameAsString = "snapshot" + originalTableNameAsString + final String snapshotNameAsString = "snapshot" + originalTableName + System.currentTimeMillis(); final byte[] snapshotName = Bytes.toBytes(snapshotNameAsString); - SnapshotTestingUtils.createSnapshotAndValidate(admin, originalTableNameAsString, + SnapshotTestingUtils.createSnapshotAndValidate(admin, originalTableName, familiesWithDataList, emptyFamiliesList, snapshotNameAsString, rootDir, fs); admin.enableTable(originalTableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraint.java index e0f2569..4e1ece04 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraint.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; @@ -69,7 +70,7 @@ public class TestConstraint { public void testConstraintPasses() throws Exception { // create the table // it would be nice if this was also a method on the util - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); for (byte[] family : new byte[][] { dummy, test }) { desc.addFamily(new HColumnDescriptor(family)); } @@ -99,7 +100,7 @@ public class TestConstraint { // create the table // it would be nice if this was also a method on the util - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); for (byte[] family : new byte[][] { dummy, test }) { desc.addFamily(new HColumnDescriptor(family)); } @@ -137,7 +138,7 @@ public class TestConstraint { @Test public void testDisableConstraint() throws Throwable { // create the table - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); // add a family to the table for (byte[] family : new byte[][] { dummy, test }) { desc.addFamily(new HColumnDescriptor(family)); @@ -171,7 +172,7 @@ public class TestConstraint { @Test public void testDisableConstraints() throws Throwable { // create the table - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); // add a family to the table for (byte[] family : new byte[][] { dummy, test }) { desc.addFamily(new HColumnDescriptor(family)); @@ -202,7 +203,7 @@ public class TestConstraint { @Test public void testIsUnloaded() throws Exception { // create the table - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); // add a family to the table for (byte[] family : new byte[][] { dummy, test }) { desc.addFamily(new HColumnDescriptor(family)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraints.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraints.java index 777da49..a706318 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraints.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/constraint/TestConstraints.java @@ -27,6 +27,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.constraint.TestConstraint.CheckWasRunConstraint; import org.apache.hadoop.hbase.constraint.WorksConstraint.NameConstraint; @@ -43,7 +44,7 @@ public class TestConstraints { @SuppressWarnings("unchecked") @Test public void testSimpleReadWrite() throws Throwable { - HTableDescriptor desc = new HTableDescriptor("table"); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("table")); Constraints.add(desc, WorksConstraint.class); List constraints = Constraints.getConstraints(desc, @@ -68,7 +69,7 @@ public class TestConstraints { @SuppressWarnings("unchecked") @Test public void testReadWriteWithConf() throws Throwable { - HTableDescriptor desc = new HTableDescriptor("table"); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("table")); Constraints.add( desc, new Pair, Configuration>( @@ -101,7 +102,7 @@ public class TestConstraints { @SuppressWarnings("unchecked") @Test public void testEnableDisableRemove() throws Exception { - HTableDescriptor desc = new HTableDescriptor("table"); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("table")); // check general enabling/disabling of constraints // first add a constraint Constraints.add(desc, AllPassConstraint.class); @@ -137,7 +138,7 @@ public class TestConstraints { @SuppressWarnings("unchecked") @Test public void testUpdateConstraint() throws Exception { - HTableDescriptor desc = new HTableDescriptor("table"); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("table")); Constraints.add(desc, CheckConfigurationConstraint.class, CheckWasRunConstraint.class); Constraints.setConfiguration(desc, CheckConfigurationConstraint.class, @@ -163,7 +164,7 @@ public class TestConstraints { */ @Test public void testRemoveUnsetConstraint() throws Throwable { - HTableDescriptor desc = new HTableDescriptor("table"); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("table")); Constraints.remove(desc); Constraints.remove(desc, AlsoWorks.class); } @@ -173,7 +174,7 @@ public class TestConstraints { Configuration conf = new Configuration(); conf.setBoolean("_ENABLED", false); conf.setLong("_PRIORITY", 10); - HTableDescriptor desc = new HTableDescriptor("table"); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("table")); Constraints.add(desc, AlsoWorks.class, conf); Constraints.add(desc, WorksConstraint.class); assertFalse(Constraints.enabled(desc, AlsoWorks.class)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALObserver.java index 130443d..a3dc051 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALObserver.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; +import org.apache.hadoop.hbase.util.Bytes; /** * Class for testing WALObserver coprocessor. @@ -86,7 +87,7 @@ implements WALObserver { HRegionInfo info, HLogKey logKey, WALEdit logEdit) throws IOException { boolean bypass = false; // check table name matches or not. - if (!Arrays.equals(HRegionInfo.getTableName(info.getRegionName()), this.tableName)) { + if (!Bytes.equals(info.getTableName().getName(), this.tableName)) { return bypass; } preWALWriteCalled = true; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java index d21a535..1e413d6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java @@ -27,7 +27,6 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.List; import java.util.Map; -import java.util.Arrays; import java.util.NavigableSet; import java.util.concurrent.atomic.AtomicInteger; @@ -289,7 +288,7 @@ public class SimpleRegionObserver extends BaseRegionObserver { assertNotNull(e.getRegion()); assertNotNull(get); assertNotNull(results); - if (Arrays.equals(e.getRegion().getTableDesc().getName(), + if (e.getRegion().getTableDesc().getTableName().equals( TestRegionObserverInterface.TEST_TABLE)) { boolean foundA = false; boolean foundB = false; @@ -321,7 +320,7 @@ public class SimpleRegionObserver extends BaseRegionObserver { assertNotNull(e); assertNotNull(e.getRegion()); assertNotNull(familyMap); - if (Arrays.equals(e.getRegion().getTableDesc().getName(), + if (e.getRegion().getTableDesc().getTableName().equals( TestRegionObserverInterface.TEST_TABLE)) { List cells = familyMap.get(TestRegionObserverInterface.A); assertNotNull(cells); @@ -355,7 +354,7 @@ public class SimpleRegionObserver extends BaseRegionObserver { assertNotNull(e.getRegion()); assertNotNull(familyMap); List cells = familyMap.get(TestRegionObserverInterface.A); - if (Arrays.equals(e.getRegion().getTableDesc().getName(), + if (e.getRegion().getTableDesc().getTableName().equals( TestRegionObserverInterface.TEST_TABLE)) { assertNotNull(cells); assertNotNull(cells.get(0)); @@ -471,7 +470,7 @@ public class SimpleRegionObserver extends BaseRegionObserver { RegionCoprocessorEnvironment e = ctx.getEnvironment(); assertNotNull(e); assertNotNull(e.getRegion()); - if (Arrays.equals(e.getRegion().getTableDesc().getName(), + if (e.getRegion().getTableDesc().getTableName().equals( TestRegionObserverInterface.TEST_TABLE)) { assertNotNull(familyPaths); assertEquals(1,familyPaths.size()); @@ -489,7 +488,7 @@ public class SimpleRegionObserver extends BaseRegionObserver { RegionCoprocessorEnvironment e = ctx.getEnvironment(); assertNotNull(e); assertNotNull(e.getRegion()); - if (Arrays.equals(e.getRegion().getTableDesc().getName(), + if (e.getRegion().getTableDesc().getTableName().equals( TestRegionObserverInterface.TEST_TABLE)) { assertNotNull(familyPaths); assertEquals(1,familyPaths.size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAggregateProtocol.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAggregateProtocol.java index f5ecae2..2dfcc00 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAggregateProtocol.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAggregateProtocol.java @@ -51,7 +51,8 @@ public class TestAggregateProtocol { /** * Creating the test infrastructure. */ - private static final byte[] TEST_TABLE = Bytes.toBytes("TestTable"); + private static final TableName TEST_TABLE = + TableName.valueOf("TestTable"); private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily"); private static final byte[] TEST_QUALIFIER = Bytes.toBytes("TestQualifier"); private static final byte[] TEST_MULTI_CQ = Bytes.toBytes("TestMultiCQ"); @@ -847,4 +848,4 @@ public class TestAggregateProtocol { std = aClient.std(TEST_TABLE, ci, scan); assertEquals(Double.NaN, std, 0); } -} \ No newline at end of file +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBigDecimalColumnInterpreter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBigDecimalColumnInterpreter.java index 391d4ba..06773bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBigDecimalColumnInterpreter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBigDecimalColumnInterpreter.java @@ -49,7 +49,8 @@ public class TestBigDecimalColumnInterpreter { /** * Creating the test infrastructure. */ - private static final byte[] TEST_TABLE = Bytes.toBytes("TestTable"); + private static final TableName TEST_TABLE = + TableName.valueOf("TestTable"); private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily"); private static final byte[] TEST_QUALIFIER = Bytes.toBytes("TestQualifier"); private static final byte[] TEST_MULTI_CQ = Bytes.toBytes("TestMultiCQ"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java index d54b4b4..0b078c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.RegionLoad; import java.io.*; import java.util.*; -import java.util.jar.*; import org.junit.*; import org.junit.experimental.categories.Category; @@ -139,7 +138,7 @@ public class TestClassLoading { LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS2); // create a table that references the coprocessors - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); htd.addFamily(new HColumnDescriptor("test")); // without configuration values htd.setValue("COPROCESSOR$1", jarFileOnHDFS1.toString() + "|" + cpName1 + @@ -161,7 +160,7 @@ public class TestClassLoading { byte[] startKey = {10, 63}; byte[] endKey = {12, 43}; admin.createTable(htd, startKey, endKey, 4); - waitForTable(htd.getName()); + waitForTable(htd.getTableName()); // verify that the coprocessors were loaded boolean foundTableRegion=false; @@ -233,13 +232,13 @@ public class TestClassLoading { File jarFile = buildCoprocessorJar(cpName3); // create a table that references the jar - HTableDescriptor htd = new HTableDescriptor(cpName3); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(cpName3)); htd.addFamily(new HColumnDescriptor("test")); htd.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName3 + "|" + Coprocessor.PRIORITY_USER); HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); admin.createTable(htd); - waitForTable(htd.getName()); + waitForTable(htd.getTableName()); // verify that the coprocessor was loaded boolean found = false; @@ -259,13 +258,13 @@ public class TestClassLoading { File jarFile = buildCoprocessorJar(cpName4); // create a table that references the jar - HTableDescriptor htd = new HTableDescriptor(cpName4); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(cpName4)); htd.addFamily(new HColumnDescriptor("test")); htd.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName4 + "|" + Coprocessor.PRIORITY_USER); HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); admin.createTable(htd); - waitForTable(htd.getName()); + waitForTable(htd.getTableName()); // verify that the coprocessor was loaded correctly boolean found = false; @@ -307,7 +306,7 @@ public class TestClassLoading { " | org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver | | k=v "; // create a table that references the jar - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); htd.addFamily(new HColumnDescriptor("test")); // add 3 coprocessors by setting htd attributes directly. @@ -333,7 +332,7 @@ public class TestClassLoading { admin.deleteTable(tableName); } admin.createTable(htd); - waitForTable(htd.getName()); + waitForTable(htd.getTableName()); // verify that the coprocessor was loaded boolean found_2 = false, found_1 = false, found_3 = false, @@ -409,7 +408,7 @@ public class TestClassLoading { LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS); // create a table that references the coprocessors - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); htd.addFamily(new HColumnDescriptor("test")); // without configuration values htd.setValue("COPROCESSOR$1", jarFileOnHDFS.toString() + "|" + cpName1 + @@ -425,7 +424,7 @@ public class TestClassLoading { admin.deleteTable(tableName); } admin.createTable(htd); - waitForTable(htd.getName()); + waitForTable(htd.getTableName()); // verify that the coprocessors were loaded boolean found1 = false, found2 = false, found2_k1 = false, @@ -542,9 +541,9 @@ public class TestClassLoading { assertEquals(loadedMasterCoprocessorsVerify, loadedMasterCoprocessors); } - private void waitForTable(byte[] name) throws InterruptedException, IOException { + private void waitForTable(TableName name) throws InterruptedException, IOException { // First wait until all regions are online - TEST_UTIL.waitTableEnabled(name); + TEST_UTIL.waitTableEnabled(name.getName()); // Now wait a bit longer for the coprocessor hosts to load the CPs Thread.sleep(1000); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java index 5501026..ee3cdb5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java @@ -29,6 +29,7 @@ import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -65,7 +66,8 @@ import static org.junit.Assert.fail; public class TestCoprocessorEndpoint { private static final Log LOG = LogFactory.getLog(TestCoprocessorEndpoint.class); - private static final byte[] TEST_TABLE = Bytes.toBytes("TestTable"); + private static final TableName TEST_TABLE = + TableName.valueOf("TestTable"); private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily"); private static final byte[] TEST_QUALIFIER = Bytes.toBytes("TestQualifier"); private static byte[] ROW = Bytes.toBytes("testRow"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java index d1a7f94..7d2f541 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java @@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -52,7 +53,6 @@ import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.SplitTransaction; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.PairOfSameType; import org.junit.experimental.categories.Category; import org.mockito.Mockito; @@ -263,7 +263,8 @@ public class TestCoprocessorInterface extends HBaseTestCase { } public void testSharedData() throws IOException { - byte [] tableName = Bytes.toBytes("testtable"); + TableName tableName = + TableName.valueOf("testtable"); byte [][] families = { fam1, fam2, fam3 }; Configuration hc = initSplit(); @@ -340,7 +341,8 @@ public class TestCoprocessorInterface extends HBaseTestCase { } public void testCoprocessorInterface() throws IOException { - byte [] tableName = Bytes.toBytes("testtable"); + TableName tableName = + TableName.valueOf("testtable"); byte [][] families = { fam1, fam2, fam3 }; Configuration hc = initSplit(); @@ -417,7 +419,7 @@ public class TestCoprocessorInterface extends HBaseTestCase { return r; } - HRegion initHRegion (byte [] tableName, String callingMethod, + HRegion initHRegion (TableName tableName, String callingMethod, Configuration conf, Class [] implClasses, byte [][] families) throws IOException { HTableDescriptor htd = new HTableDescriptor(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java index 3818e47..2df18d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; @@ -80,7 +81,7 @@ public class TestMasterCoprocessorExceptionWithAbort { public void run() { // create a table : master coprocessor will throw an exception and not // catch it. - HTableDescriptor htd = new HTableDescriptor(TEST_TABLE); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TEST_TABLE)); htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); try { HBaseAdmin admin = UTIL.getHBaseAdmin(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java index 5c8afe1..b6fa096 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; @@ -183,7 +184,7 @@ public class TestMasterCoprocessorExceptionWithRemove { BuggyMasterObserver.class.getName(); assertTrue(master.getLoadedCoprocessors().contains(coprocessorName)); - HTableDescriptor htd1 = new HTableDescriptor(TEST_TABLE1); + HTableDescriptor htd1 = new HTableDescriptor(TableName.valueOf(TEST_TABLE1)); htd1.addFamily(new HColumnDescriptor(TEST_FAMILY1)); boolean threwDNRE = false; @@ -215,7 +216,7 @@ public class TestMasterCoprocessorExceptionWithRemove { // Verify that BuggyMasterObserver has been removed due to its misbehavior // by creating another table: should not have a problem this time. - HTableDescriptor htd2 = new HTableDescriptor(TEST_TABLE2); + HTableDescriptor htd2 = new HTableDescriptor(TableName.valueOf(TEST_TABLE2)); htd2.addFamily(new HColumnDescriptor(TEST_FAMILY2)); HBaseAdmin admin = UTIL.getHBaseAdmin(); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java index 3f0611d..c48b0d6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java @@ -73,6 +73,12 @@ public class TestMasterObserver { private boolean postDeleteTableCalled; private boolean preModifyTableCalled; private boolean postModifyTableCalled; + private boolean preCreateNamespaceCalled; + private boolean postCreateNamespaceCalled; + private boolean preDeleteNamespaceCalled; + private boolean postDeleteNamespaceCalled; + private boolean preModifyNamespaceCalled; + private boolean postModifyNamespaceCalled; private boolean preAddColumnCalled; private boolean postAddColumnCalled; private boolean preModifyColumnCalled; @@ -138,6 +144,12 @@ public class TestMasterObserver { postDeleteTableCalled = false; preModifyTableCalled = false; postModifyTableCalled = false; + preCreateNamespaceCalled = false; + postCreateNamespaceCalled = false; + preDeleteNamespaceCalled = false; + postDeleteNamespaceCalled = false; + preModifyNamespaceCalled = false; + postModifyNamespaceCalled = false; preAddColumnCalled = false; postAddColumnCalled = false; preModifyColumnCalled = false; @@ -215,7 +227,7 @@ public class TestMasterObserver { @Override public void preDeleteTable(ObserverContext env, - byte[] tableName) throws IOException { + TableName tableName) throws IOException { if (bypass) { env.bypass(); } @@ -224,7 +236,7 @@ public class TestMasterObserver { @Override public void postDeleteTable(ObserverContext env, - byte[] tableName) throws IOException { + TableName tableName) throws IOException { postDeleteTableCalled = true; } @@ -238,7 +250,7 @@ public class TestMasterObserver { @Override public void preModifyTable(ObserverContext env, - byte[] tableName, HTableDescriptor htd) throws IOException { + TableName tableName, HTableDescriptor htd) throws IOException { if (bypass) { env.bypass(); }else{ @@ -249,7 +261,7 @@ public class TestMasterObserver { @Override public void postModifyTable(ObserverContext env, - byte[] tableName, HTableDescriptor htd) throws IOException { + TableName tableName, HTableDescriptor htd) throws IOException { postModifyTableCalled = true; } @@ -262,8 +274,77 @@ public class TestMasterObserver { } @Override + public void preCreateNamespace(ObserverContext env, + NamespaceDescriptor ns) throws IOException { + if (bypass) { + env.bypass(); + } + preCreateNamespaceCalled = true; + } + + @Override + public void postCreateNamespace(ObserverContext env, + NamespaceDescriptor ns) throws IOException { + postCreateNamespaceCalled = true; + } + + public boolean wasCreateNamespaceCalled() { + return preCreateNamespaceCalled && postCreateNamespaceCalled; + } + + public boolean preCreateNamespaceCalledOnly() { + return preCreateNamespaceCalled && !postCreateNamespaceCalled; + } + + @Override + public void preDeleteNamespace(ObserverContext env, + String name) throws IOException { + if (bypass) { + env.bypass(); + } + preDeleteNamespaceCalled = true; + } + + @Override + public void postDeleteNamespace(ObserverContext env, + String name) throws IOException { + postDeleteNamespaceCalled = true; + } + + public boolean wasDeleteNamespaceCalled() { + return preDeleteNamespaceCalled && postDeleteNamespaceCalled; + } + + public boolean preDeleteNamespaceCalledOnly() { + return preDeleteNamespaceCalled && !postDeleteNamespaceCalled; + } + + @Override + public void preModifyNamespace(ObserverContext env, + NamespaceDescriptor ns) throws IOException { + if (bypass) { + env.bypass(); + } + preModifyNamespaceCalled = true; + } + + @Override + public void postModifyNamespace(ObserverContext env, + NamespaceDescriptor ns) throws IOException { + postModifyNamespaceCalled = true; + } + + public boolean wasModifyNamespaceCalled() { + return preModifyNamespaceCalled && postModifyNamespaceCalled; + } + + public boolean preModifyNamespaceCalledOnly() { + return preModifyNamespaceCalled && !postModifyNamespaceCalled; + } + + @Override public void preAddColumn(ObserverContext env, - byte[] tableName, HColumnDescriptor column) throws IOException { + TableName tableName, HColumnDescriptor column) throws IOException { if (bypass) { env.bypass(); }else{ @@ -275,7 +356,7 @@ public class TestMasterObserver { @Override public void postAddColumn(ObserverContext env, - byte[] tableName, HColumnDescriptor column) throws IOException { + TableName tableName, HColumnDescriptor column) throws IOException { postAddColumnCalled = true; } @@ -289,7 +370,7 @@ public class TestMasterObserver { @Override public void preModifyColumn(ObserverContext env, - byte[] tableName, HColumnDescriptor descriptor) throws IOException { + TableName tableName, HColumnDescriptor descriptor) throws IOException { if (bypass) { env.bypass(); } @@ -298,7 +379,7 @@ public class TestMasterObserver { @Override public void postModifyColumn(ObserverContext env, - byte[] tableName, HColumnDescriptor descriptor) throws IOException { + TableName tableName, HColumnDescriptor descriptor) throws IOException { postModifyColumnCalled = true; } @@ -312,7 +393,7 @@ public class TestMasterObserver { @Override public void preDeleteColumn(ObserverContext env, - byte[] tableName, byte[] c) throws IOException { + TableName tableName, byte[] c) throws IOException { if (bypass) { env.bypass(); } @@ -321,7 +402,7 @@ public class TestMasterObserver { @Override public void postDeleteColumn(ObserverContext env, - byte[] tableName, byte[] c) throws IOException { + TableName tableName, byte[] c) throws IOException { postDeleteColumnCalled = true; } @@ -335,7 +416,7 @@ public class TestMasterObserver { @Override public void preEnableTable(ObserverContext env, - byte[] tableName) throws IOException { + TableName tableName) throws IOException { if (bypass) { env.bypass(); } @@ -344,7 +425,7 @@ public class TestMasterObserver { @Override public void postEnableTable(ObserverContext env, - byte[] tableName) throws IOException { + TableName tableName) throws IOException { postEnableTableCalled = true; } @@ -358,7 +439,7 @@ public class TestMasterObserver { @Override public void preDisableTable(ObserverContext env, - byte[] tableName) throws IOException { + TableName tableName) throws IOException { if (bypass) { env.bypass(); } @@ -367,7 +448,7 @@ public class TestMasterObserver { @Override public void postDisableTable(ObserverContext env, - byte[] tableName) throws IOException { + TableName tableName) throws IOException { postDisableTableCalled = true; } @@ -654,7 +735,7 @@ public class TestMasterObserver { @Override public void preDeleteTableHandler( - ObserverContext env, byte[] tableName) + ObserverContext env, TableName tableName) throws IOException { if (bypass) { env.bypass(); @@ -664,7 +745,7 @@ public class TestMasterObserver { @Override public void postDeleteTableHandler( - ObserverContext ctx, byte[] tableName) + ObserverContext ctx, TableName tableName) throws IOException { postDeleteTableHandlerCalled = true; } @@ -678,7 +759,7 @@ public class TestMasterObserver { } @Override public void preModifyTableHandler( - ObserverContext env, byte[] tableName, + ObserverContext env, TableName tableName, HTableDescriptor htd) throws IOException { if (bypass) { env.bypass(); @@ -688,7 +769,7 @@ public class TestMasterObserver { @Override public void postModifyTableHandler( - ObserverContext env, byte[] tableName, + ObserverContext env, TableName tableName, HTableDescriptor htd) throws IOException { postModifyTableHandlerCalled = true; } @@ -703,7 +784,7 @@ public class TestMasterObserver { @Override public void preAddColumnHandler( - ObserverContext env, byte[] tableName, + ObserverContext env, TableName tableName, HColumnDescriptor column) throws IOException { if (bypass) { env.bypass(); @@ -713,7 +794,7 @@ public class TestMasterObserver { @Override public void postAddColumnHandler( - ObserverContext ctx, byte[] tableName, + ObserverContext ctx, TableName tableName, HColumnDescriptor column) throws IOException { postAddColumnHandlerCalled = true; } @@ -727,7 +808,7 @@ public class TestMasterObserver { @Override public void preModifyColumnHandler( - ObserverContext env, byte[] tableName, + ObserverContext env, TableName tableName, HColumnDescriptor descriptor) throws IOException { if (bypass) { env.bypass(); @@ -737,7 +818,7 @@ public class TestMasterObserver { @Override public void postModifyColumnHandler( - ObserverContext ctx, byte[] tableName, + ObserverContext ctx, TableName tableName, HColumnDescriptor descriptor) throws IOException { postModifyColumnHandlerCalled = true; } @@ -751,7 +832,7 @@ public class TestMasterObserver { } @Override public void preDeleteColumnHandler( - ObserverContext env, byte[] tableName, + ObserverContext env, TableName tableName, byte[] c) throws IOException { if (bypass) { env.bypass(); @@ -761,7 +842,7 @@ public class TestMasterObserver { @Override public void postDeleteColumnHandler( - ObserverContext ctx, byte[] tableName, + ObserverContext ctx, TableName tableName, byte[] c) throws IOException { postDeleteColumnHandlerCalled = true; } @@ -776,7 +857,7 @@ public class TestMasterObserver { @Override public void preEnableTableHandler( - ObserverContext env, byte[] tableName) + ObserverContext env, TableName tableName) throws IOException { if (bypass) { env.bypass(); @@ -786,7 +867,7 @@ public class TestMasterObserver { @Override public void postEnableTableHandler( - ObserverContext ctx, byte[] tableName) + ObserverContext ctx, TableName tableName) throws IOException { postEnableTableHandlerCalled = true; } @@ -801,7 +882,7 @@ public class TestMasterObserver { @Override public void preDisableTableHandler( - ObserverContext env, byte[] tableName) + ObserverContext env, TableName tableName) throws IOException { if (bypass) { env.bypass(); @@ -811,7 +892,7 @@ public class TestMasterObserver { @Override public void postDisableTableHandler( - ObserverContext ctx, byte[] tableName) + ObserverContext ctx, TableName tableName) throws IOException { postDisableTableHandlerCalled = true; } @@ -826,7 +907,8 @@ public class TestMasterObserver { @Override public void preGetTableDescriptors(ObserverContext ctx, - List tableNamesList, List descriptors) throws IOException { + List tableNamesList, List descriptors) + throws IOException { preGetTableDescriptorsCalled = true; } @@ -843,7 +925,8 @@ public class TestMasterObserver { private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); private static byte[] TEST_SNAPSHOT = Bytes.toBytes("observed_snapshot"); - private static byte[] TEST_TABLE = Bytes.toBytes("observed_table"); + private static TableName TEST_TABLE = + TableName.valueOf("observed_table"); private static byte[] TEST_CLONE = Bytes.toBytes("observed_clone"); private static byte[] TEST_FAMILY = Bytes.toBytes("fam1"); private static byte[] TEST_FAMILY2 = Bytes.toBytes("fam2"); @@ -1103,12 +1186,64 @@ public class TestMasterObserver { } } - private void modifyTableSync(HBaseAdmin admin, byte[] tableName, HTableDescriptor htd) + @Test + public void testNamespaceOperations() throws Exception { + MiniHBaseCluster cluster = UTIL.getHBaseCluster(); + String testNamespace = "observed_ns"; + HMaster master = cluster.getMaster(); + MasterCoprocessorHost host = master.getCoprocessorHost(); + CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor( + CPMasterObserver.class.getName()); + + cp.enableBypass(false); + cp.resetStates(); + + + // create a table + HBaseAdmin admin = UTIL.getHBaseAdmin(); + admin.createNamespace(NamespaceDescriptor.create(testNamespace).build()); + assertTrue("Test namespace should be created", cp.wasCreateNamespaceCalled()); + + assertNotNull(admin.getNamespaceDescriptor(testNamespace)); + + // turn off bypass, run the tests again + cp.enableBypass(true); + cp.resetStates(); + + admin.modifyNamespace(NamespaceDescriptor.create(testNamespace).build()); + assertTrue("Test namespace should not have been modified", + cp.preModifyNamespaceCalledOnly()); + + assertNotNull(admin.getNamespaceDescriptor(testNamespace)); + + admin.deleteNamespace(testNamespace); + assertTrue("Test namespace should not have been deleted", cp.preDeleteNamespaceCalledOnly()); + + assertNotNull(admin.getNamespaceDescriptor(testNamespace)); + + cp.enableBypass(false); + cp.resetStates(); + + // delete table + admin.modifyNamespace(NamespaceDescriptor.create(testNamespace).build()); + assertTrue("Test namespace should have been modified", cp.wasModifyNamespaceCalled()); + + admin.deleteNamespace(testNamespace); + assertTrue("Test namespace should have been deleted", cp.wasDeleteNamespaceCalled()); + + cp.enableBypass(true); + cp.resetStates(); + + admin.createNamespace(NamespaceDescriptor.create(testNamespace).build()); + assertTrue("Test namespace should not be created", cp.preCreateNamespaceCalledOnly()); + } + + private void modifyTableSync(HBaseAdmin admin, TableName tableName, HTableDescriptor htd) throws IOException { admin.modifyTable(tableName, htd); //wait until modify table finishes for (int t = 0; t < 100; t++) { //10 sec timeout - HTableDescriptor td = admin.getTableDescriptor(htd.getName()); + HTableDescriptor td = admin.getTableDescriptor(htd.getTableName()); if (td.equals(htd)) { break; } @@ -1228,7 +1363,7 @@ public class TestMasterObserver { cp.resetStates(); GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest((List)null); + RequestConverter.buildGetTableDescriptorsRequest((List)null); master.getTableDescriptors(null, req); assertTrue("Coprocessor should be called on table descriptors request", diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java index df01f38..dbea844 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; @@ -35,11 +36,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; -import org.apache.hadoop.hbase.coprocessor.ObserverContext; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -import org.apache.hadoop.hbase.util.Bytes; import org.junit.AfterClass; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -50,7 +47,8 @@ import org.junit.experimental.categories.Category; @Category(MediumTests.class) public class TestOpenTableInCoprocessor { - private static final byte[] otherTable = Bytes.toBytes("otherTable"); + private static final TableName otherTable = + TableName.valueOf("otherTable"); private static final byte[] family = new byte[] { 'f' }; private static boolean completed = false; @@ -83,7 +81,7 @@ public class TestOpenTableInCoprocessor { @Test public void testCoprocessorCanCreateConnectionToRemoteTable() throws Throwable { - HTableDescriptor primary = new HTableDescriptor("primary"); + HTableDescriptor primary = new HTableDescriptor(TableName.valueOf("primary")); primary.addFamily(new HColumnDescriptor(family)); // add our coprocessor primary.addCoprocessor(SendToOtherTableCoprocessor.class.getName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index 3aa40e6..e4bace3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -27,7 +27,6 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.lang.reflect.Method; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import org.apache.commons.logging.Log; @@ -36,6 +35,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -76,7 +76,8 @@ import org.junit.experimental.categories.Category; public class TestRegionObserverInterface { static final Log LOG = LogFactory.getLog(TestRegionObserverInterface.class); - public static final byte[] TEST_TABLE = Bytes.toBytes("TestTable"); + public static final TableName TEST_TABLE = + TableName.valueOf("TestTable"); public final static byte[] A = Bytes.toBytes("a"); public final static byte[] B = Bytes.toBytes("b"); public final static byte[] C = Bytes.toBytes("c"); @@ -103,7 +104,7 @@ public class TestRegionObserverInterface { @Test public void testRegionObserver() throws IOException { - byte[] tableName = TEST_TABLE; + TableName tableName = TEST_TABLE; // recreate table every time in order to reset the status of the // coprocessor. HTable table = util.createTable(tableName, new byte[][] {A, B, C}); @@ -167,7 +168,7 @@ public class TestRegionObserverInterface { @Test public void testRowMutation() throws IOException { - byte[] tableName = TEST_TABLE; + TableName tableName = TEST_TABLE; HTable table = util.createTable(tableName, new byte[][] {A, B, C}); verifyMethodResult(SimpleRegionObserver.class, new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut", @@ -202,7 +203,7 @@ public class TestRegionObserverInterface { @Test public void testIncrementHook() throws IOException { - byte[] tableName = TEST_TABLE; + TableName tableName = TEST_TABLE; HTable table = util.createTable(tableName, new byte[][] {A, B, C}); Increment inc = new Increment(Bytes.toBytes(0)); @@ -228,7 +229,8 @@ public class TestRegionObserverInterface { @Test // HBase-3583 public void testHBase3583() throws IOException { - byte[] tableName = Bytes.toBytes("testHBase3583"); + TableName tableName = + TableName.valueOf("testHBase3583"); util.createTable(tableName, new byte[][] {A, B, C}); verifyMethodResult(SimpleRegionObserver.class, @@ -278,7 +280,8 @@ public class TestRegionObserverInterface { @Test // HBase-3758 public void testHBase3758() throws IOException { - byte[] tableName = Bytes.toBytes("testHBase3758"); + TableName tableName = + TableName.valueOf("testHBase3758"); util.createTable(tableName, new byte[][] {A, B, C}); verifyMethodResult(SimpleRegionObserver.class, @@ -389,7 +392,7 @@ public class TestRegionObserverInterface { admin.deleteTable(compactTable); } - HTableDescriptor htd = new HTableDescriptor(compactTable); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(compactTable)); htd.addFamily(new HColumnDescriptor(A)); htd.addCoprocessor(EvenOnlyCompactor.class.getName()); admin.createTable(htd); @@ -454,7 +457,7 @@ public class TestRegionObserverInterface { @Test public void bulkLoadHFileTest() throws Exception { String testName = TestRegionObserverInterface.class.getName()+".bulkLoadHFileTest"; - byte[] tableName = TEST_TABLE; + TableName tableName = TEST_TABLE; Configuration conf = util.getConfiguration(); HTable table = util.createTable(tableName, new byte[][] {A, B, C}); @@ -483,12 +486,12 @@ public class TestRegionObserverInterface { } // check each region whether the coprocessor upcalls are called or not. - private void verifyMethodResult(Class c, String methodName[], byte[] tableName, + private void verifyMethodResult(Class c, String methodName[], TableName tableName, Object value[]) throws IOException { try { for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) { for (HRegionInfo r : ProtobufUtil.getOnlineRegions(t.getRegionServer())) { - if (!Arrays.equals(r.getTableName(), tableName)) { + if (!r.getTableName().equals(tableName)) { continue; } RegionCoprocessorHost cph = t.getRegionServer().getOnlineRegion(r.getRegionName()). diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java index 07033a6..46c0e49 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; @@ -140,11 +141,11 @@ public class TestRegionObserverScannerOpenHook { HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf, byte[]... families) throws IOException { - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); for (byte[] family : families) { htd.addFamily(new HColumnDescriptor(family)); } - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); Path path = new Path(DIR + callingMethod); HRegion r = HRegion.createHRegion(info, path, conf, htd); // this following piece is a hack. currently a coprocessorHost @@ -220,7 +221,7 @@ public class TestRegionObserverScannerOpenHook { String tableName = "testRegionObserverCompactionTimeStacking"; byte[] ROW = Bytes.toBytes("testRow"); byte[] A = Bytes.toBytes("A"); - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor(A)); desc.addCoprocessor(EmptyRegionObsever.class.getName(), null, Coprocessor.PRIORITY_USER, null); desc.addCoprocessor(NoDataFromCompaction.class.getName(), null, Coprocessor.PRIORITY_HIGHEST, @@ -229,7 +230,7 @@ public class TestRegionObserverScannerOpenHook { HBaseAdmin admin = UTIL.getHBaseAdmin(); admin.createTable(desc); - HTable table = new HTable(conf, desc.getName()); + HTable table = new HTable(conf, desc.getTableName()); // put a row and flush it to disk Put put = new Put(ROW); @@ -237,8 +238,8 @@ public class TestRegionObserverScannerOpenHook { table.put(put); table.flushCommits(); - HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getName()); - List regions = rs.getOnlineRegions(desc.getName()); + HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getTableName()); + List regions = rs.getOnlineRegions(desc.getTableName()); assertEquals("More than 1 region serving test table with 1 row", 1, regions.size()); HRegion region = regions.get(0); admin.flush(region.getRegionName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java index ba9c3bd..41dae08 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -94,11 +95,11 @@ public class TestRegionObserverStacking extends TestCase { HRegion initHRegion (byte [] tableName, String callingMethod, Configuration conf, byte [] ... families) throws IOException { - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); for(byte [] family : families) { htd.addFamily(new HColumnDescriptor(family)); } - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); Path path = new Path(DIR + callingMethod); HRegion r = HRegion.createHRegion(info, path, conf, htd); // this following piece is a hack. currently a coprocessorHost @@ -112,7 +113,7 @@ public class TestRegionObserverStacking extends TestCase { public void testRegionObserverStacking() throws Exception { byte[] ROW = Bytes.toBytes("testRow"); - byte[] TABLE = Bytes.toBytes(getClass().getName()); + byte[] TABLE = Bytes.toBytes(this.getClass().getSimpleName()); byte[] A = Bytes.toBytes("A"); byte[][] FAMILIES = new byte[][] { A } ; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java index 5962ef4..160f104 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java @@ -49,7 +49,8 @@ import static org.junit.Assert.*; public class TestRegionServerCoprocessorExceptionWithAbort { static final Log LOG = LogFactory.getLog(TestRegionServerCoprocessorExceptionWithAbort.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final String TABLE_NAME = "observed_table"; + private static final TableName TABLE_NAME = + TableName.valueOf("observed_table"); @BeforeClass public static void setupBeforeClass() throws Exception { @@ -72,7 +73,7 @@ public class TestRegionServerCoprocessorExceptionWithAbort { // When we try to write to TEST_TABLE, the buggy coprocessor will // cause a NullPointerException, which will cause the regionserver (which // hosts the region we attempted to write to) to abort. - byte[] TEST_TABLE = Bytes.toBytes(TABLE_NAME); + TableName TEST_TABLE = TABLE_NAME; byte[] TEST_FAMILY = Bytes.toBytes("aaa"); HTable table = TEST_UTIL.createTable(TEST_TABLE, TEST_FAMILY); @@ -102,7 +103,8 @@ public class TestRegionServerCoprocessorExceptionWithAbort { public void prePut(final ObserverContext c, final Put put, final WALEdit edit, final Durability durability) { - String tableName = c.getEnvironment().getRegion().getRegionInfo().getTableNameAsString(); + TableName tableName = + c.getEnvironment().getRegion().getRegionInfo().getTableName(); if (TABLE_NAME.equals(tableName)) { throw new NullPointerException("Buggy coprocessor"); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithRemove.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithRemove.java index a122d48..6de987f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithRemove.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithRemove.java @@ -53,7 +53,8 @@ public class TestRegionServerCoprocessorExceptionWithRemove { final Put put, final WALEdit edit, final Durability durability) { String tableName = - c.getEnvironment().getRegion().getRegionInfo().getTableNameAsString(); + c.getEnvironment().getRegion().getRegionInfo() + .getTableName().getNameAsString(); if (tableName.equals("observed_table")) { Integer i = null; i = i + 1; @@ -88,7 +89,8 @@ public class TestRegionServerCoprocessorExceptionWithRemove { // execute, which will set the rsZKNodeDeleted flag to true, which will // pass this test. - byte[] TEST_TABLE = Bytes.toBytes("observed_table"); + TableName TEST_TABLE = + TableName.valueOf("observed_table"); byte[] TEST_FAMILY = Bytes.toBytes("aaa"); HTable table = TEST_UTIL.createTable(TEST_TABLE, TEST_FAMILY); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java index 7a25881..37872c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java @@ -220,16 +220,16 @@ public class TestWALObserver { public void testWALCoprocessorReplay() throws Exception { // WAL replay is handled at HRegion::replayRecoveredEdits(), which is // ultimately called by HRegion::initialize() - byte[] tableName = Bytes.toBytes("testWALCoprocessorReplay"); - final HTableDescriptor htd = getBasic3FamilyHTableDescriptor(Bytes - .toString(tableName)); + TableName tableName = TableName.valueOf("testWALCoprocessorReplay"); + final HTableDescriptor htd = getBasic3FamilyHTableDescriptor(tableName); // final HRegionInfo hri = // createBasic3FamilyHRegionInfo(Bytes.toString(tableName)); // final HRegionInfo hri1 = // createBasic3FamilyHRegionInfo(Bytes.toString(tableName)); final HRegionInfo hri = new HRegionInfo(tableName, null, null); - final Path basedir = new Path(this.hbaseRootDir, Bytes.toString(tableName)); + final Path basedir = + FSUtils.getTableDir(this.hbaseRootDir, tableName); deleteDir(basedir); fs.mkdirs(new Path(basedir, hri.getEncodedName())); @@ -306,13 +306,13 @@ public class TestWALObserver { * @param tableName Name of table to use when we create HTableDescriptor. */ private HRegionInfo createBasic3FamilyHRegionInfo(final String tableName) { - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); for (int i = 0; i < TEST_FAMILY.length; i++) { HColumnDescriptor a = new HColumnDescriptor(TEST_FAMILY[i]); htd.addFamily(a); } - return new HRegionInfo(htd.getName(), null, null, false); + return new HRegionInfo(htd.getTableName(), null, null, false); } /* @@ -367,7 +367,7 @@ public class TestWALObserver { return HLogFactory.createHLog(FileSystem.get(c), hbaseRootDir, logName, c); } - private void addWALEdits(final byte[] tableName, final HRegionInfo hri, + private void addWALEdits(final TableName tableName, final HRegionInfo hri, final byte[] rowName, final byte[] family, final int count, EnvironmentEdge ee, final HLog wal, final HTableDescriptor htd) throws IOException { @@ -383,7 +383,7 @@ public class TestWALObserver { } private HTableDescriptor getBasic3FamilyHTableDescriptor( - final String tableName) { + final TableName tableName) { HTableDescriptor htd = new HTableDescriptor(tableName); for (int i = 0; i < TEST_FAMILY.length; i++) { @@ -394,7 +394,7 @@ public class TestWALObserver { } private HTableDescriptor createBasic3FamilyHTD(final String tableName) { - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a")); htd.addFamily(a); HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java index 6540d4a..a45f767 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java @@ -46,9 +46,9 @@ public class TestColumnPrefixFilter { @Test public void testColumnPrefixFilter() throws IOException { String family = "Family"; - HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestColumnPrefixFilter")); htd.addFamily((new HColumnDescriptor(family)).setMaxVersions(3)); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); HRegion region = HRegion.createHRegion(info, TEST_UTIL. getDataTestDir(), TEST_UTIL.getConfiguration(), htd); try { @@ -108,9 +108,9 @@ public class TestColumnPrefixFilter { @Test public void testColumnPrefixFilterWithFilterList() throws IOException { String family = "Family"; - HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestColumnPrefixFilter")); htd.addFamily((new HColumnDescriptor(family)).setMaxVersions(3)); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); HRegion region = HRegion.createHRegion(info, TEST_UTIL. getDataTestDir(), TEST_UTIL.getConfiguration(), htd); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java index 277cff0..3662d92 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java @@ -71,14 +71,14 @@ public class TestDependentColumnFilter { public void setUp() throws Exception { testVals = makeTestVals(); - HTableDescriptor htd = new HTableDescriptor(this.getClass().getName()); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(this.getClass().getSimpleName())); HColumnDescriptor hcd0 = new HColumnDescriptor(FAMILIES[0]); hcd0.setMaxVersions(3); htd.addFamily(hcd0); HColumnDescriptor hcd1 = new HColumnDescriptor(FAMILIES[1]); hcd1.setMaxVersions(3); htd.addFamily(hcd1); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); this.region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd); addData(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java index 1d6c234..742d4e4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; @@ -129,7 +130,7 @@ public class TestFilter { @Before public void setUp() throws Exception { - HTableDescriptor htd = new HTableDescriptor("TestFilter"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestFilter")); htd.addFamily(new HColumnDescriptor(FAMILIES[0])); htd.addFamily(new HColumnDescriptor(FAMILIES[1])); htd.addFamily(new HColumnDescriptor(FAMILIES_1[0])); @@ -137,7 +138,7 @@ public class TestFilter { htd.addFamily(new HColumnDescriptor(NEW_FAMILIES[0])); htd.addFamily(new HColumnDescriptor(NEW_FAMILIES[1])); htd.addFamily(new HColumnDescriptor(FAMILIES_1[1])); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); this.region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd); @@ -1770,9 +1771,9 @@ public class TestFilter { public void testNestedFilterListWithSCVF() throws IOException { byte[] columnStatus = Bytes.toBytes("S"); - HTableDescriptor htd = new HTableDescriptor("testNestedFilterListWithSCVF"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testNestedFilterListWithSCVF")); htd.addFamily(new HColumnDescriptor(FAMILIES[0])); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); HRegion testRegion = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd); for(int i=0; i<10; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWithScanLimits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWithScanLimits.java index f959142..00cc398 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWithScanLimits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWithScanLimits.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.ipc.RpcClient; @@ -135,7 +136,7 @@ public class TestFilterWithScanLimits { assertNotNull("HBaseAdmin is not initialized successfully.", admin); if (admin != null) { - HTableDescriptor desc = new HTableDescriptor(name); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(name)); HColumnDescriptor coldef = new HColumnDescriptor(Bytes.toBytes("f1")); desc.addFamily(coldef); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWrapper.java index 6f4e9ec..9a04db9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWrapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterWrapper.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -140,7 +141,7 @@ public class TestFilterWrapper { assertNotNull("HBaseAdmin is not initialized successfully.", admin); if (admin != null) { - HTableDescriptor desc = new HTableDescriptor(name); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(name)); HColumnDescriptor coldef = new HColumnDescriptor(Bytes.toBytes("f1")); desc.addFamily(coldef); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java index d360da6..5f068c2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java @@ -46,12 +46,12 @@ public class TestMultipleColumnPrefixFilter { @Test public void testMultipleColumnPrefixFilter() throws IOException { String family = "Family"; - HTableDescriptor htd = new HTableDescriptor("TestMultipleColumnPrefixFilter"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestMultipleColumnPrefixFilter")); HColumnDescriptor hcd = new HColumnDescriptor(family); hcd.setMaxVersions(3); htd.addFamily(hcd); // HRegionInfo info = new HRegionInfo(htd, null, null, false); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); HRegion region = HRegion.createHRegion(info, TEST_UTIL. getDataTestDir(), TEST_UTIL.getConfiguration(), htd); @@ -110,14 +110,14 @@ public class TestMultipleColumnPrefixFilter { public void testMultipleColumnPrefixFilterWithManyFamilies() throws IOException { String family1 = "Family1"; String family2 = "Family2"; - HTableDescriptor htd = new HTableDescriptor("TestMultipleColumnPrefixFilter"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestMultipleColumnPrefixFilter")); HColumnDescriptor hcd1 = new HColumnDescriptor(family1); hcd1.setMaxVersions(3); htd.addFamily(hcd1); HColumnDescriptor hcd2 = new HColumnDescriptor(family2); hcd2.setMaxVersions(3); htd.addFamily(hcd2); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); HRegion region = HRegion.createHRegion(info, TEST_UTIL. getDataTestDir(), TEST_UTIL.getConfiguration(), htd); @@ -181,9 +181,9 @@ public class TestMultipleColumnPrefixFilter { @Test public void testMultipleColumnPrefixFilterWithColumnPrefixFilter() throws IOException { String family = "Family"; - HTableDescriptor htd = new HTableDescriptor("TestMultipleColumnPrefixFilter"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestMultipleColumnPrefixFilter")); htd.addFamily(new HColumnDescriptor(family)); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); HRegion region = HRegion.createHRegion(info, TEST_UTIL. getDataTestDir(), TEST_UTIL.getConfiguration(),htd); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java new file mode 100644 index 0000000..7f25005 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java @@ -0,0 +1,150 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.io; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.HFileArchiveUtil; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +/** + * Test that FileLink switches between alternate locations + * when the current location moves or gets deleted. + */ +@Category(SmallTests.class) +public class TestHFileLink { + + @Test + public void testValidLinkNames() { + String validLinkNames[] = {"foo=fefefe-0123456", "ns=foo=abababa-fefefefe"}; + + for(String name : validLinkNames) { + Assert.assertTrue("Failed validating:" + name, name.matches(HFileLink.LINK_NAME_REGEX)); + } + + for(String name : validLinkNames) { + Assert.assertTrue("Failed validating:" + name, HFileLink.isHFileLink(name)); + } + + String testName = "foo=fefefe-0123456"; + Assert.assertEquals(TableName.valueOf("foo"), + HFileLink.getReferencedTableName(testName)); + Assert.assertEquals("fefefe", HFileLink.getReferencedRegionName(testName)); + Assert.assertEquals("0123456", HFileLink.getReferencedHFileName(testName)); + Assert.assertEquals(testName, + HFileLink.createHFileLinkName(TableName.valueOf("foo"), "fefefe", "0123456")); + + testName = "ns=foo=fefefe-0123456"; + Assert.assertEquals(TableName.valueOf("ns", "foo"), + HFileLink.getReferencedTableName(testName)); + Assert.assertEquals("fefefe", HFileLink.getReferencedRegionName(testName)); + Assert.assertEquals("0123456", HFileLink.getReferencedHFileName(testName)); + Assert.assertEquals(testName, + HFileLink.createHFileLinkName(TableName.valueOf("ns", "foo"), "fefefe", "0123456")); + + for(String name : validLinkNames) { + Matcher m = HFileLink.LINK_NAME_PATTERN.matcher(name); + assertTrue(m.matches()); + Assert.assertEquals(HFileLink.getReferencedTableName(name), + TableName.valueOf(m.group(1), m.group(2))); + Assert.assertEquals(HFileLink.getReferencedRegionName(name), + m.group(3)); + Assert.assertEquals(HFileLink.getReferencedHFileName(name), + m.group(4)); + } + } + + @Test + public void testBackReference() { + Path rootDir = new Path("/root"); + Path archiveDir = new Path(rootDir, ".archive"); + String storeFileName = "121212"; + String linkDir = FileLink.BACK_REFERENCES_DIRECTORY_PREFIX + storeFileName; + String encodedRegion = "FEFE"; + String cf = "cf1"; + + TableName refTables[] = {TableName.valueOf("refTable"), + TableName.valueOf("ns", "refTable")}; + + for(TableName refTable : refTables) { + Path refTableDir = FSUtils.getTableDir(archiveDir, refTable); + Path refRegionDir = HRegion.getRegionDir(refTableDir, encodedRegion); + Path refDir = new Path(refRegionDir, cf); + Path refLinkDir = new Path(refDir, linkDir); + String refStoreFileName = refTable.getNameAsString().replace( + TableName.NAMESPACE_DELIM, '=') + "=" + encodedRegion + "-" + storeFileName; + + TableName tableNames[] = {TableName.valueOf("tableName1"), + TableName.valueOf("ns", "tableName2")}; + + for( TableName tableName : tableNames) { + Path tableDir = FSUtils.getTableDir(rootDir, tableName); + Path regionDir = HRegion.getRegionDir(tableDir, encodedRegion); + Path cfDir = new Path(regionDir, cf); + + //Verify back reference creation + assertEquals(encodedRegion+"."+ + tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM, '='), + HFileLink.createBackReferenceName(tableName.getNameAsString(), + encodedRegion)); + + //verify parsing back reference + Pair parsedRef = + HFileLink.parseBackReferenceName(encodedRegion+"."+ + tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM, '=')); + assertEquals(parsedRef.getFirst(), tableName); + assertEquals(parsedRef.getSecond(), encodedRegion); + + //verify resolving back reference + Path storeFileDir = new Path(refLinkDir, encodedRegion+"."+ + tableName.getNameAsString().replace(TableName.NAMESPACE_DELIM, '=')); + Path linkPath = new Path(cfDir, refStoreFileName); + assertEquals(linkPath, HFileLink.getHFileFromBackReference(rootDir, storeFileDir)); + } + } + } + + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java index bdd0056..e899b74 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.LargeTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; @@ -90,7 +91,7 @@ public class TestChangingEncoding { private void prepareTest(String testId) throws IOException { tableName = "test_table_" + testId; - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); hcd = new HColumnDescriptor(CF); htd.addFamily(hcd); admin.createTable(htd); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java index fea07a4..4128935 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java @@ -92,7 +92,7 @@ public class TestLoadAndSwitchEncodeOnDisk extends assertAllOnLine(t); System.err.println("\nCompacting the table\n"); - admin.majorCompact(TABLE); + admin.majorCompact(TABLE.getName()); // Wait until compaction completes Threads.sleepWithoutInterrupt(5000); HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java index 7cd62ac..6f85a4e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java @@ -27,6 +27,7 @@ import java.util.Map; import java.util.Set; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -53,7 +54,7 @@ import org.junit.runners.Parameterized.Parameters; @Category(SmallTests.class) public class TestScannerSelectionUsingKeyRange { private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static String TABLE = "myTable"; + private static TableName TABLE = TableName.valueOf("myTable"); private static String FAMILY = "myCF"; private static byte[] FAMILY_BYTES = Bytes.toBytes(FAMILY); private static final int NUM_ROWS = 8; @@ -96,7 +97,7 @@ public class TestScannerSelectionUsingKeyRange { .setBloomFilterType(bloomType); HTableDescriptor htd = new HTableDescriptor(TABLE); htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE)); + HRegionInfo info = new HRegionInfo(TABLE); HRegion region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), conf, htd); for (int iFile = 0; iFile < NUM_FILES; ++iFile) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java index 0d3c062..b31047c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java @@ -22,12 +22,12 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.Map; import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; @@ -62,7 +61,7 @@ public class TestScannerSelectionUsingTTL { private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static String TABLE = "myTable"; + private static TableName TABLE = TableName.valueOf("myTable"); private static String FAMILY = "myCF"; private static byte[] FAMILY_BYTES = Bytes.toBytes(FAMILY); @@ -106,7 +105,7 @@ public class TestScannerSelectionUsingTTL { .setTimeToLive(TTL_SECONDS); HTableDescriptor htd = new HTableDescriptor(TABLE); htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE)); + HRegionInfo info = new HRegionInfo(TABLE); HRegion region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(info.getEncodedName()), conf, htd); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java index 71811a3..fd8fec1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java @@ -30,6 +30,7 @@ import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; +import java.util.Set; import java.util.Random; import java.util.Set; import java.util.concurrent.Callable; @@ -43,6 +44,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -96,7 +98,8 @@ public class TestHFileOutputFormat { private static final byte[][] FAMILIES = { Bytes.add(PerformanceEvaluation.FAMILY_NAME, Bytes.toBytes("-A")) , Bytes.add(PerformanceEvaluation.FAMILY_NAME, Bytes.toBytes("-B"))}; - private static final byte[] TABLE_NAME = Bytes.toBytes("TestTable"); + private static final TableName TABLE_NAME = + TableName.valueOf("TestTable"); private HBaseTestingUtility util = new HBaseTestingUtility(); @@ -440,7 +443,7 @@ public class TestHFileOutputFormat { LOG.info("Waiting for table to disable"); } admin.enableTable(TABLE_NAME); - util.waitTableAvailable(TABLE_NAME); + util.waitTableAvailable(TABLE_NAME.getName()); assertEquals("Data should remain after reopening of regions", tableDigestBefore, util.checksumRows(table)); } finally { @@ -664,7 +667,7 @@ public class TestHFileOutputFormat { // deep inspection: get the StoreFile dir final Path storePath = HStore.getStoreHomedir( - HTableDescriptor.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME), + FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME), admin.getTableRegions(TABLE_NAME).get(0), FAMILIES[0]); assertEquals(0, fs.listStatus(storePath).length); @@ -690,7 +693,7 @@ public class TestHFileOutputFormat { assertEquals(2, fs.listStatus(storePath).length); // minor compactions shouldn't get rid of the file - admin.compact(TABLE_NAME); + admin.compact(TABLE_NAME.getName()); try { quickPoll(new Callable() { public Boolean call() throws Exception { @@ -703,7 +706,7 @@ public class TestHFileOutputFormat { } // a major compaction should work though - admin.majorCompact(TABLE_NAME); + admin.majorCompact(TABLE_NAME.getName()); quickPoll(new Callable() { public Boolean call() throws Exception { return fs.listStatus(storePath).length == 1; @@ -732,7 +735,7 @@ public class TestHFileOutputFormat { // deep inspection: get the StoreFile dir final Path storePath = HStore.getStoreHomedir( - HTableDescriptor.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME), + FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME), admin.getTableRegions(TABLE_NAME).get(0), FAMILIES[0]); assertEquals(0, fs.listStatus(storePath).length); @@ -741,7 +744,7 @@ public class TestHFileOutputFormat { Put p = new Put(Bytes.toBytes("test")); p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1")); table.put(p); - admin.flush(TABLE_NAME); + admin.flush(TABLE_NAME.getName()); assertEquals(1, util.countRows(table)); quickPoll(new Callable() { public Boolean call() throws Exception { @@ -767,7 +770,7 @@ public class TestHFileOutputFormat { assertEquals(2, fs.listStatus(storePath).length); // minor compactions shouldn't get rid of the file - admin.compact(TABLE_NAME); + admin.compact(TABLE_NAME.getName()); try { quickPoll(new Callable() { public Boolean call() throws Exception { @@ -780,7 +783,7 @@ public class TestHFileOutputFormat { } // a major compaction should work though - admin.majorCompact(TABLE_NAME); + admin.majorCompact(TABLE_NAME.getName()); quickPoll(new Callable() { public Boolean call() throws Exception { return fs.listStatus(storePath).length == 1; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHLogRecordReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHLogRecordReader.java index 485b22c..1b3fec2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHLogRecordReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHLogRecordReader.java @@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -40,7 +41,6 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.InputSplit; -import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.MapReduceTestUtil; import org.junit.AfterClass; import org.junit.Before; @@ -57,8 +57,9 @@ public class TestHLogRecordReader { private static Configuration conf; private static FileSystem fs; private static Path hbaseDir; - private static final byte [] tableName = Bytes.toBytes(getName()); - private static final byte [] rowName = tableName; + private static final TableName tableName = + TableName.valueOf(getName()); + private static final byte [] rowName = tableName.getName(); private static final HRegionInfo info = new HRegionInfo(tableName, Bytes.toBytes(""), Bytes.toBytes(""), false); private static final byte [] family = Bytes.toBytes("column"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 3143928..fef266f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HTable; @@ -192,7 +193,7 @@ public class TestImportExport { */ @Test public void testMetaExport() throws Exception { - String EXPORT_TABLE = ".META."; + String EXPORT_TABLE = TableName.META_TABLE_NAME.getNameAsString(); String[] args = new String[] { EXPORT_TABLE, FQ_OUTPUT_DIR, "1", "0", "0" }; assertTrue(runExport(args)); } @@ -203,7 +204,7 @@ public class TestImportExport { @Test public void testExportScannerBatching() throws Exception { String BATCH_TABLE = "exportWithBatch"; - HTableDescriptor desc = new HTableDescriptor(BATCH_TABLE); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(BATCH_TABLE)); desc.addFamily(new HColumnDescriptor(FAMILYA) .setMaxVersions(1) ); @@ -233,7 +234,7 @@ public class TestImportExport { @Test public void testWithDeletes() throws Exception { String EXPORT_TABLE = "exportWithDeletes"; - HTableDescriptor desc = new HTableDescriptor(EXPORT_TABLE); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(EXPORT_TABLE)); desc.addFamily(new HColumnDescriptor(FAMILYA) .setMaxVersions(5) .setKeepDeletedCells(true) @@ -264,7 +265,7 @@ public class TestImportExport { assertTrue(runExport(args)); String IMPORT_TABLE = "importWithDeletes"; - desc = new HTableDescriptor(IMPORT_TABLE); + desc = new HTableDescriptor(TableName.valueOf(IMPORT_TABLE)); desc.addFamily(new HColumnDescriptor(FAMILYA) .setMaxVersions(5) .setKeepDeletedCells(true) @@ -302,7 +303,7 @@ public class TestImportExport { public void testWithFilter() throws Exception { // Create simple table to export String EXPORT_TABLE = "exportSimpleCase_ImportWithFilter"; - HTableDescriptor desc = new HTableDescriptor(EXPORT_TABLE); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(EXPORT_TABLE)); desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5)); UTIL.getHBaseAdmin().createTable(desc); HTable exportTable = new HTable(UTIL.getConfiguration(), EXPORT_TABLE); @@ -321,7 +322,7 @@ public class TestImportExport { // Import to a new table String IMPORT_TABLE = "importWithFilter"; - desc = new HTableDescriptor(IMPORT_TABLE); + desc = new HTableDescriptor(TableName.valueOf(IMPORT_TABLE)); desc.addFamily(new HColumnDescriptor(FAMILYA).setMaxVersions(5)); UTIL.getHBaseAdmin().createTable(desc); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java index 211dad9..cbdfe71 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java @@ -143,7 +143,7 @@ public class TestLoadIncrementalHFiles { final byte[] TABLE = Bytes.toBytes("mytable_"+testName); - HTableDescriptor htd = new HTableDescriptor(TABLE); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY); familyDesc.setBloomFilterType(bloomType); htd.addFamily(familyDesc); @@ -183,7 +183,7 @@ public class TestLoadIncrementalHFiles { final byte[] TABLE = Bytes.toBytes("mytable_"+testName); HBaseAdmin admin = new HBaseAdmin(util.getConfiguration()); - HTableDescriptor htd = new HTableDescriptor(TABLE); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); // set real family name to upper case in purpose to simulate the case that // family name in HFiles is invalid HColumnDescriptor family = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java index fe4038f..162ca04 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java @@ -35,6 +35,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -117,7 +118,7 @@ public class TestLoadIncrementalHFilesSplitRecovery { private void setupTable(String table, int cfs) throws IOException { try { LOG.info("Creating table " + table); - HTableDescriptor htd = new HTableDescriptor(table); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table)); for (int i = 0; i < 10; i++) { htd.addFamily(new HColumnDescriptor(family(i))); } @@ -157,7 +158,7 @@ public class TestLoadIncrementalHFilesSplitRecovery { .toBytes(table)); for (HRegionInfo hri : ProtobufUtil.getOnlineRegions(hrs)) { - if (Bytes.equals(hri.getTableName(), Bytes.toBytes(table))) { + if (Bytes.equals(hri.getTableName().getName(), Bytes.toBytes(table))) { // splitRegion doesn't work if startkey/endkey are null ProtobufUtil.split(hrs, hri, rowkey(ROWCOUNT / 2)); // hard code split } @@ -168,7 +169,7 @@ public class TestLoadIncrementalHFilesSplitRecovery { do { regions = 0; for (HRegionInfo hri : ProtobufUtil.getOnlineRegions(hrs)) { - if (Bytes.equals(hri.getTableName(), Bytes.toBytes(table))) { + if (Bytes.equals(hri.getTableName().getName(), Bytes.toBytes(table))) { regions++; } } @@ -239,7 +240,7 @@ public class TestLoadIncrementalHFilesSplitRecovery { util.getConfiguration(), useSecure) { protected List tryAtomicRegionLoad(final HConnection conn, - byte[] tableName, final byte[] first, Collection lqis) + TableName tableName, final byte[] first, Collection lqis) throws IOException { int i = attmptedCalls.incrementAndGet(); if (i == 1) { @@ -274,10 +275,10 @@ public class TestLoadIncrementalHFilesSplitRecovery { // Make it so we return a particular location when asked. final HRegionLocation loc = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, new ServerName("example.org", 1234, 0)); - Mockito.when(c.getRegionLocation((byte[]) Mockito.any(), + Mockito.when(c.getRegionLocation((TableName) Mockito.any(), (byte[]) Mockito.any(), Mockito.anyBoolean())). thenReturn(loc); - Mockito.when(c.locateRegion((byte[]) Mockito.any(), (byte[]) Mockito.any())). + Mockito.when(c.locateRegion((TableName) Mockito.any(), (byte[]) Mockito.any())). thenReturn(loc); ClientProtos.ClientService.BlockingInterface hri = Mockito.mock(ClientProtos.ClientService.BlockingInterface.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java index 0c03429..5811eb9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.mapreduce; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.SmallTests; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -30,8 +31,12 @@ import static org.junit.Assert.assertTrue; public class TestTableSplit { @Test public void testHashCode() { - TableSplit split1 = new TableSplit("table".getBytes(), "row-start".getBytes(), "row-end".getBytes(), "location"); - TableSplit split2 = new TableSplit("table".getBytes(), "row-start".getBytes(), "row-end".getBytes(), "location"); + TableSplit split1 = new TableSplit(TableName.valueOf("table"), + "row-start".getBytes(), + "row-end".getBytes(), "location"); + TableSplit split2 = new TableSplit(TableName.valueOf("table"), + "row-start".getBytes(), + "row-end".getBytes(), "location"); assertEquals (split1, split2); assertTrue (split1.hashCode() == split2.hashCode()); HashSet set = new HashSet(2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java index 2f70fe4..ca6caf8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java @@ -142,12 +142,12 @@ public class TestTimeRangeMapRed { @Test public void testTimeRangeMapRed() throws IOException, InterruptedException, ClassNotFoundException { - final HTableDescriptor desc = new HTableDescriptor(TABLE_NAME); + final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TABLE_NAME)); final HColumnDescriptor col = new HColumnDescriptor(FAMILY_NAME); col.setMaxVersions(Integer.MAX_VALUE); desc.addFamily(col); admin.createTable(desc); - HTable table = new HTable(UTIL.getConfiguration(), desc.getName()); + HTable table = new HTable(UTIL.getConfiguration(), desc.getTableName()); prepareTest(table); runTestOnTable(); verify(table); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index f933dff..cfd407c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -28,6 +28,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -134,7 +135,7 @@ public class TestWALPlayer { configuration.set(WALPlayer.TABLES_KEY, "table"); HLogKeyValueMapper mapper = new HLogKeyValueMapper(); HLogKey key = mock(HLogKey.class); - when(key.getTablename()).thenReturn(Bytes.toBytes("table")); + when(key.getTablename()).thenReturn(TableName.valueOf("table")); @SuppressWarnings("unchecked") Mapper.Context context = mock(Context.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index ebc5ead..8c9e0ba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; @@ -512,7 +513,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override - public List getOnlineRegions(byte[] tableName) throws IOException { + public List getOnlineRegions(TableName tableName) throws IOException { // TODO Auto-generated method stub return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java index 4cf42d5..388db2c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java @@ -32,6 +32,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -68,7 +69,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper; import org.apache.hadoop.hbase.zookeeper.ZKAssign; @@ -103,7 +103,7 @@ public class TestAssignmentManager { private static final ServerName SERVERNAME_B = new ServerName("example.org", 0, 5678); private static final HRegionInfo REGIONINFO = - new HRegionInfo(Bytes.toBytes("t"), + new HRegionInfo(TableName.valueOf("t"), HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW); private static int assignmentCount; private static boolean enabling = false; @@ -500,7 +500,7 @@ public class TestAssignmentManager { // adding region in pending close. am.getRegionStates().updateRegionState( REGIONINFO, State.SPLITTING, SERVERNAME_A); - am.getZKTable().setEnabledTable(REGIONINFO.getTableNameAsString()); + am.getZKTable().setEnabledTable(REGIONINFO.getTableName()); RegionTransition data = RegionTransition.createRegionTransition(EventType.RS_ZK_REGION_SPLITTING, REGIONINFO.getRegionName(), SERVERNAME_A); String node = ZKAssign.getNodeName(this.watcher, REGIONINFO.getEncodedName()); @@ -549,9 +549,9 @@ public class TestAssignmentManager { // adding region in pending close. am.getRegionStates().updateRegionState(REGIONINFO, State.PENDING_CLOSE); if (state == Table.State.DISABLING) { - am.getZKTable().setDisablingTable(REGIONINFO.getTableNameAsString()); + am.getZKTable().setDisablingTable(REGIONINFO.getTableName()); } else { - am.getZKTable().setDisabledTable(REGIONINFO.getTableNameAsString()); + am.getZKTable().setDisabledTable(REGIONINFO.getTableName()); } RegionTransition data = RegionTransition.createRegionTransition(EventType.M_ZK_REGION_CLOSING, REGIONINFO.getRegionName(), SERVERNAME_A); @@ -575,7 +575,7 @@ public class TestAssignmentManager { am.getRegionStates().isRegionsInTransition()); } } finally { - am.setEnabledTable(REGIONINFO.getTableNameAsString()); + am.setEnabledTable(REGIONINFO.getTableName()); executor.shutdown(); am.shutdown(); // Clean up all znodes @@ -889,7 +889,7 @@ public class TestAssignmentManager { } try{ // set table in disabling state. - am.getZKTable().setDisablingTable(REGIONINFO.getTableNameAsString()); + am.getZKTable().setDisablingTable(REGIONINFO.getTableName()); am.joinCluster(); // should not call retainAssignment if we get empty regions in assignAllUserRegions. assertFalse( @@ -897,12 +897,12 @@ public class TestAssignmentManager { gate.get()); // need to change table state from disabling to disabled. assertTrue("Table should be disabled.", - am.getZKTable().isDisabledTable(REGIONINFO.getTableNameAsString())); + am.getZKTable().isDisabledTable(REGIONINFO.getTableName())); } finally { this.server.getConfiguration().setClass( HConstants.HBASE_MASTER_LOADBALANCER_CLASS, DefaultLoadBalancer.class, LoadBalancer.class); - am.getZKTable().setEnabledTable(REGIONINFO.getTableNameAsString()); + am.getZKTable().setEnabledTable(REGIONINFO.getTableName()); am.shutdown(); } } @@ -928,17 +928,17 @@ public class TestAssignmentManager { this.serverManager); try { // set table in enabling state. - am.getZKTable().setEnablingTable(REGIONINFO.getTableNameAsString()); - new EnableTableHandler(server, REGIONINFO.getTableName(), am.getCatalogTracker(), - am, new NullTableLockManager(), true).prepare() + am.getZKTable().setEnablingTable(REGIONINFO.getTableName()); + new EnableTableHandler(server, REGIONINFO.getTableName(), + am.getCatalogTracker(), am, new NullTableLockManager(), true).prepare() .process(); assertEquals("Number of assignments should be 1.", 1, assignmentCount); assertTrue("Table should be enabled.", - am.getZKTable().isEnabledTable(REGIONINFO.getTableNameAsString())); + am.getZKTable().isEnabledTable(REGIONINFO.getTableName())); } finally { enabling = false; assignmentCount = 0; - am.getZKTable().setEnabledTable(REGIONINFO.getTableNameAsString()); + am.getZKTable().setEnabledTable(REGIONINFO.getTableName()); am.shutdown(); ZKAssign.deleteAllNodes(this.watcher); } @@ -965,7 +965,7 @@ public class TestAssignmentManager { // adding region plan am.regionPlans.put(REGIONINFO.getEncodedName(), new RegionPlan(REGIONINFO, SERVERNAME_B, SERVERNAME_A)); - am.getZKTable().setEnabledTable(REGIONINFO.getTableNameAsString()); + am.getZKTable().setEnabledTable(REGIONINFO.getTableName()); try { am.assignInvoked = false; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java index 476191c..07d3dad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java @@ -31,6 +31,7 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -94,13 +95,13 @@ public class TestAssignmentManagerOnCluster { public void testAssignRegion() throws Exception { String table = "testAssignRegion"; try { - HTableDescriptor desc = new HTableDescriptor(table); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table)); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); - HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); + HTable meta = new HTable(conf, TableName.META_TABLE_NAME); HRegionInfo hri = new HRegionInfo( - desc.getName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); + desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); MetaEditor.addRegionToMeta(meta, hri); HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); @@ -124,13 +125,13 @@ public class TestAssignmentManagerOnCluster { ServerName deadServer = null; HMaster master = null; try { - HTableDescriptor desc = new HTableDescriptor(table); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table)); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); - HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); + HTable meta = new HTable(conf, TableName.META_TABLE_NAME); HRegionInfo hri = new HRegionInfo( - desc.getName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); + desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); MetaEditor.addRegionToMeta(meta, hri); master = TEST_UTIL.getHBaseCluster().getMaster(); @@ -178,7 +179,8 @@ public class TestAssignmentManagerOnCluster { */ @Test (timeout=60000) public void testOfflineRegion() throws Exception { - String table = "testOfflineRegion"; + TableName table = + TableName.valueOf("testOfflineRegion"); try { HRegionInfo hri = createTableAndGetOneRegion(table); @@ -191,7 +193,7 @@ public class TestAssignmentManagerOnCluster { long timeoutTime = System.currentTimeMillis() + 800; while (true) { List regions = - regionStates.getRegionsOfTable(Bytes.toBytes(table)); + regionStates.getRegionsOfTable(table); if (!regions.contains(hri)) break; long now = System.currentTimeMillis(); if (now > timeoutTime) { @@ -203,7 +205,7 @@ public class TestAssignmentManagerOnCluster { RegionState regionState = regionStates.getRegionState(hri); assertTrue(regionState.isOffline()); } finally { - TEST_UTIL.deleteTable(Bytes.toBytes(table)); + TEST_UTIL.deleteTable(table); } } @@ -212,7 +214,8 @@ public class TestAssignmentManagerOnCluster { */ @Test (timeout=50000) public void testMoveRegion() throws Exception { - String table = "testMoveRegion"; + TableName table = + TableName.valueOf("testMoveRegion"); try { HRegionInfo hri = createTableAndGetOneRegion(table); @@ -248,12 +251,12 @@ public class TestAssignmentManagerOnCluster { } } finally { - TEST_UTIL.deleteTable(Bytes.toBytes(table)); + TEST_UTIL.deleteTable(table); } } HRegionInfo createTableAndGetOneRegion( - final String tableName) throws IOException, InterruptedException { + final TableName tableName) throws IOException, InterruptedException { HTableDescriptor desc = new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), 5); @@ -263,7 +266,7 @@ public class TestAssignmentManagerOnCluster { long timeoutTime = System.currentTimeMillis() + 1000; while (true) { List regions = master.getAssignmentManager(). - getRegionStates().getRegionsOfTable(Bytes.toBytes(tableName)); + getRegionStates().getRegionsOfTable(tableName); if (regions.size() > 3) { return regions.get(2); } @@ -286,13 +289,13 @@ public class TestAssignmentManagerOnCluster { public void testForceAssignWhileClosing() throws Exception { String table = "testForceAssignWhileClosing"; try { - HTableDescriptor desc = new HTableDescriptor(table); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table)); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); - HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); + HTable meta = new HTable(conf, TableName.META_TABLE_NAME); HRegionInfo hri = new HRegionInfo( - desc.getName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); + desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); MetaEditor.addRegionToMeta(meta, hri); HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); @@ -332,13 +335,13 @@ public class TestAssignmentManagerOnCluster { public void testCloseFailed() throws Exception { String table = "testCloseFailed"; try { - HTableDescriptor desc = new HTableDescriptor(table); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table)); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); - HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); + HTable meta = new HTable(conf, TableName.META_TABLE_NAME); HRegionInfo hri = new HRegionInfo( - desc.getName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); + desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); MetaEditor.addRegionToMeta(meta, hri); HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); @@ -376,13 +379,13 @@ public class TestAssignmentManagerOnCluster { public void testOpenFailed() throws Exception { String table = "testOpenFailed"; try { - HTableDescriptor desc = new HTableDescriptor(table); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(table)); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); - HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); + HTable meta = new HTable(conf, TableName.META_TABLE_NAME); HRegionInfo hri = new HRegionInfo( - desc.getName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); + desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); MetaEditor.addRegionToMeta(meta, hri); MockLoadBalancer.controledRegion = hri.getEncodedName(); @@ -415,19 +418,20 @@ public class TestAssignmentManagerOnCluster { */ @Test (timeout=60000) public void testOpenFailedUnrecoverable() throws Exception { - String table = "testOpenFailedUnrecoverable"; + TableName table = + TableName.valueOf("testOpenFailedUnrecoverable"); try { HTableDescriptor desc = new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(FAMILY)); admin.createTable(desc); - HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); + HTable meta = new HTable(conf, TableName.META_TABLE_NAME); HRegionInfo hri = new HRegionInfo( - desc.getName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); + desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); MetaEditor.addRegionToMeta(meta, hri); FileSystem fs = FileSystem.get(conf); - Path tableDir= FSUtils.getTablePath(FSUtils.getRootDir(conf), table); + Path tableDir= FSUtils.getTableDir(FSUtils.getRootDir(conf), table); Path regionDir = new Path(tableDir, hri.getEncodedName()); // create a file named the same as the region dir to // mess up with region opening @@ -454,13 +458,15 @@ public class TestAssignmentManagerOnCluster { getRegionStates().getRegionServerOfRegion(hri); TEST_UTIL.assertRegionOnServer(hri, serverName, 200); } finally { - TEST_UTIL.deleteTable(Bytes.toBytes(table)); + TEST_UTIL.deleteTable(table); } } @Test (timeout=60000) public void testSSHWhenDisablingTableRegionsInOpeningOrPendingOpenState() throws Exception { - final String table = "testSSHWhenDisablingTableRegionsInOpeningOrPendingOpenState"; + final TableName table = + TableName.valueOf + ("testSSHWhenDisablingTableRegionsInOpeningOrPendingOpenState"); AssignmentManager am = TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager(); HRegionInfo hri = null; ServerName serverName = null; @@ -491,7 +497,7 @@ public class TestAssignmentManagerOnCluster { am.regionOnline(hri, serverName); } am.getZKTable().setDisabledTable(table); - TEST_UTIL.deleteTable(Bytes.toBytes(table)); + TEST_UTIL.deleteTable(table); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 00846e1..7af154c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -26,6 +26,7 @@ import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.spy; import java.io.IOException; +import java.util.List; import java.util.Map; import java.util.SortedMap; import java.util.TreeMap; @@ -37,11 +38,13 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; @@ -180,7 +183,7 @@ public class TestCatalogJanitor { } @Override - public void checkTableModifiable(byte[] tableName) throws IOException { + public void checkTableModifiable(TableName tableName) throws IOException { //no-op } @@ -261,7 +264,7 @@ public class TestCatalogJanitor { public TableDescriptors getTableDescriptors() { return new TableDescriptors() { @Override - public HTableDescriptor remove(String tablename) throws IOException { + public HTableDescriptor remove(TableName tablename) throws IOException { // TODO Auto-generated method stub return null; } @@ -273,15 +276,14 @@ public class TestCatalogJanitor { } @Override - public HTableDescriptor get(byte[] tablename) + public HTableDescriptor get(TableName tablename) throws IOException { - return get(Bytes.toString(tablename)); + return createHTableDescriptor(); } @Override - public HTableDescriptor get(String tablename) - throws IOException { - return createHTableDescriptor(); + public Map getByNamespace(String name) throws IOException { + return null; } @Override @@ -303,25 +305,59 @@ public class TestCatalogJanitor { } @Override - public void deleteTable(byte[] tableName) throws IOException { } + public void createNamespace(NamespaceDescriptor descriptor) throws IOException { + //To change body of implemented methods use File | Settings | File Templates. + } + + @Override + public void modifyNamespace(NamespaceDescriptor descriptor) throws IOException { + //To change body of implemented methods use File | Settings | File Templates. + } @Override - public void modifyTable(byte[] tableName, HTableDescriptor descriptor) throws IOException { } + public void deleteNamespace(String name) throws IOException { + //To change body of implemented methods use File | Settings | File Templates. + } @Override - public void enableTable(byte[] tableName) throws IOException { } + public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException { + return null; //To change body of implemented methods use File | Settings | File Templates. + } @Override - public void disableTable(byte[] tableName) throws IOException { } + public List listNamespaceDescriptors() throws IOException { + return null; //To change body of implemented methods use File | Settings | File Templates. + } @Override - public void addColumn(byte[] tableName, HColumnDescriptor column) throws IOException { } + public List getTableDescriptorsByNamespace(String name) throws IOException { + return null; //To change body of implemented methods use File | Settings | File Templates. + } @Override - public void modifyColumn(byte[] tableName, HColumnDescriptor descriptor) throws IOException { } + public void deleteTable(TableName tableName) throws IOException { } @Override - public void deleteColumn(byte[] tableName, byte[] columnName) throws IOException { } + public void modifyTable(TableName tableName, HTableDescriptor descriptor) + throws IOException { } + + @Override + public void enableTable(TableName tableName) throws IOException { } + + @Override + public void disableTable(TableName tableName) throws IOException { } + + @Override + public void addColumn(TableName tableName, HColumnDescriptor column) + throws IOException { } + + @Override + public void modifyColumn(TableName tableName, HColumnDescriptor descriptor) + throws IOException { } + + @Override + public void deleteColumn(TableName tableName, byte[] columnName) + throws IOException { } @Override public TableLockManager getTableLockManager() { @@ -349,16 +385,16 @@ public class TestCatalogJanitor { MasterServices services = new MockMasterServices(server); CatalogJanitor janitor = new CatalogJanitor(server, services); // Create regions. - HTableDescriptor htd = new HTableDescriptor("table"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table")); htd.addFamily(new HColumnDescriptor("f")); HRegionInfo parent = - new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), + new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee")); HRegionInfo splita = - new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), + new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); HRegionInfo splitb = - new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), + new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee")); // Test that when both daughter regions are in place, that we do not // remove the parent. @@ -366,7 +402,7 @@ public class TestCatalogJanitor { // Add a reference under splitA directory so we don't clear out the parent. Path rootdir = services.getMasterFileSystem().getRootDir(); Path tabledir = - HTableDescriptor.getTableDir(rootdir, htd.getName()); + FSUtils.getTableDir(rootdir, htd.getTableName()); Path storedir = HStore.getStoreHomedir(tabledir, splita, htd.getColumnFamilies()[0].getName()); Reference ref = Reference.createTopReference(Bytes.toBytes("ccc")); @@ -430,30 +466,30 @@ public class TestCatalogJanitor { // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc. // Parent - HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), + HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), lastEndKey); // Sleep a second else the encoded name on these regions comes out // same for all with same start key and made in same second. Thread.sleep(1001); // Daughter a - HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), + HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); Thread.sleep(1001); // Make daughters of daughter a; splitaa and splitab. - HRegionInfo splitaa = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), + HRegionInfo splitaa = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("bbb")); - HRegionInfo splitab = new HRegionInfo(htd.getName(), Bytes.toBytes("bbb"), + HRegionInfo splitab = new HRegionInfo(htd.getTableName(), Bytes.toBytes("bbb"), Bytes.toBytes("ccc")); // Daughter b - HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), + HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), lastEndKey); Thread.sleep(1001); // Make Daughters of daughterb; splitba and splitbb. - HRegionInfo splitba = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), + HRegionInfo splitba = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("ddd")); - HRegionInfo splitbb = new HRegionInfo(htd.getName(), Bytes.toBytes("ddd"), + HRegionInfo splitbb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ddd"), lastEndKey); // First test that our Comparator works right up in CatalogJanitor. @@ -532,24 +568,24 @@ public class TestCatalogJanitor { // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc. // Parent - HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), + HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), new byte[0], true); // Sleep a second else the encoded name on these regions comes out // same for all with same start key and made in same second. Thread.sleep(1001); // Daughter a - HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), + HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"), true); Thread.sleep(1001); // Make daughters of daughter a; splitaa and splitab. - HRegionInfo splitaa = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), + HRegionInfo splitaa = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("bbb"), false); - HRegionInfo splitab = new HRegionInfo(htd.getName(), Bytes.toBytes("bbb"), + HRegionInfo splitab = new HRegionInfo(htd.getTableName(), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), false); // Daughter b - HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), + HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"), new byte[0]); Thread.sleep(1001); @@ -611,11 +647,14 @@ public class TestCatalogJanitor { */ // root region - HRegionInfo rootRegion = new HRegionInfo(htd.getName(), HConstants.EMPTY_START_ROW, + HRegionInfo rootRegion = new HRegionInfo(htd.getTableName(), + HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, true); - HRegionInfo firstRegion = new HRegionInfo(htd.getName(), HConstants.EMPTY_START_ROW, + HRegionInfo firstRegion = new HRegionInfo(htd.getTableName(), + HConstants.EMPTY_START_ROW, Bytes.toBytes("bbb"), true); - HRegionInfo lastRegion = new HRegionInfo(htd.getName(), Bytes.toBytes("bbb"), + HRegionInfo lastRegion = new HRegionInfo(htd.getTableName(), + Bytes.toBytes("bbb"), HConstants.EMPTY_END_ROW, true); assertTrue(comp.compare(rootRegion, rootRegion) == 0); @@ -626,14 +665,18 @@ public class TestCatalogJanitor { assertTrue(comp.compare(firstRegion, lastRegion) < 0); //first region split into a, b - HRegionInfo firstRegiona = new HRegionInfo(htd.getName(), HConstants.EMPTY_START_ROW, + HRegionInfo firstRegiona = new HRegionInfo(htd.getTableName(), + HConstants.EMPTY_START_ROW, Bytes.toBytes("aaa"), true); - HRegionInfo firstRegionb = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), + HRegionInfo firstRegionb = new HRegionInfo(htd.getTableName(), + Bytes.toBytes("aaa"), Bytes.toBytes("bbb"), true); //last region split into a, b - HRegionInfo lastRegiona = new HRegionInfo(htd.getName(), Bytes.toBytes("bbb"), + HRegionInfo lastRegiona = new HRegionInfo(htd.getTableName(), + Bytes.toBytes("bbb"), Bytes.toBytes("ddd"), true); - HRegionInfo lastRegionb = new HRegionInfo(htd.getName(), Bytes.toBytes("ddd"), + HRegionInfo lastRegionb = new HRegionInfo(htd.getTableName(), + Bytes.toBytes("ddd"), HConstants.EMPTY_END_ROW, true); assertTrue(comp.compare(firstRegiona, firstRegiona) == 0); @@ -657,9 +700,11 @@ public class TestCatalogJanitor { assertTrue(comp.compare(firstRegionb, lastRegiona) < 0); assertTrue(comp.compare(firstRegionb, lastRegionb) < 0); - HRegionInfo lastRegionaa = new HRegionInfo(htd.getName(), Bytes.toBytes("bbb"), + HRegionInfo lastRegionaa = new HRegionInfo(htd.getTableName(), + Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), false); - HRegionInfo lastRegionab = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), + HRegionInfo lastRegionab = new HRegionInfo(htd.getTableName(), + Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), false); assertTrue(comp.compare(lastRegiona, lastRegionaa) < 0); @@ -680,11 +725,15 @@ public class TestCatalogJanitor { CatalogJanitor janitor = new CatalogJanitor(server, services); // Create regions. - HTableDescriptor htd = new HTableDescriptor(table); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table)); htd.addFamily(new HColumnDescriptor("f")); - HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee")); - HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); - HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee")); + HRegionInfo parent = new HRegionInfo(htd.getTableName(), + Bytes.toBytes("aaa"), Bytes.toBytes("eee")); + HRegionInfo splita = new HRegionInfo(htd.getTableName(), + Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); + HRegionInfo splitb = new HRegionInfo(htd.getTableName(), + Bytes.toBytes("ccc"), + Bytes.toBytes("eee")); // Test that when both daughter regions are in place, that we do not // remove the parent. @@ -695,7 +744,7 @@ public class TestCatalogJanitor { // archive directory. Otherwise, it just seems to pick the first root directory it can find (so // the single test passes, but when the full suite is run, things get borked). FSUtils.setRootDir(fs.getConf(), rootdir); - Path tabledir = HTableDescriptor.getTableDir(rootdir, htd.getName()); + Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName()); Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName()); Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent, tabledir, htd.getColumnFamilies()[0].getName()); @@ -760,11 +809,14 @@ public class TestCatalogJanitor { CatalogJanitor janitor = new CatalogJanitor(server, services); // Create regions. - HTableDescriptor htd = new HTableDescriptor(table); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table)); htd.addFamily(new HColumnDescriptor("f")); - HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee")); - HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); - HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee")); + HRegionInfo parent = new HRegionInfo(htd.getTableName(), + Bytes.toBytes("aaa"), Bytes.toBytes("eee")); + HRegionInfo splita = new HRegionInfo(htd.getTableName(), + Bytes.toBytes("aaa"), Bytes.toBytes("ccc")); + HRegionInfo splitb = new HRegionInfo(htd.getTableName(), + Bytes.toBytes("ccc"), Bytes.toBytes("eee")); // Test that when both daughter regions are in place, that we do not // remove the parent. Result r = createResult(parent, splita, splitb); @@ -776,7 +828,7 @@ public class TestCatalogJanitor { // archive directory. Otherwise, it just seems to pick the first root directory it can find (so // the single test passes, but when the full suite is run, things get borked). FSUtils.setRootDir(fs.getConf(), rootdir); - Path tabledir = HTableDescriptor.getTableDir(rootdir, parent.getTableName()); + Path tabledir = FSUtils.getTableDir(rootdir, parent.getTableName()); Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName()); System.out.println("Old root:" + rootdir); System.out.println("Old table:" + tabledir); @@ -790,7 +842,6 @@ public class TestCatalogJanitor { addMockStoreFiles(2, services, storedir); // get the current store files for comparison FileStatus[] storeFiles = fs.listStatus(storedir); - // do the cleaning of the parent assertTrue(janitor.cleanParent(parent, r)); @@ -859,7 +910,7 @@ public class TestCatalogJanitor { final HRegionInfo daughter, final byte [] midkey, final boolean top) throws IOException { Path rootdir = services.getMasterFileSystem().getRootDir(); - Path tabledir = HTableDescriptor.getTableDir(rootdir, parent.getTableName()); + Path tabledir = FSUtils.getTableDir(rootdir, parent.getTableName()); Path storedir = HStore.getStoreHomedir(tabledir, daughter, htd.getColumnFamilies()[0].getName()); Reference ref = @@ -879,7 +930,7 @@ public class TestCatalogJanitor { } private HTableDescriptor createHTableDescriptor() { - HTableDescriptor htd = new HTableDescriptor("t"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("t")); htd.addFamily(new HColumnDescriptor("f")); return htd; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java index 27b29b6..d308e3d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java @@ -53,6 +53,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -61,6 +62,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogCounters; import org.apache.hadoop.hbase.Waiter; @@ -83,14 +85,18 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hbase.zookeeper.ZKAssign; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.zookeeper.KeeperException; import org.junit.After; +import org.junit.AfterClass; import org.junit.Assert; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -107,13 +113,28 @@ public class TestDistributedLogSplitting { } // Start a cluster with 2 masters and 6 regionservers - final int NUM_MASTERS = 2; - final int NUM_RS = 6; + static final int NUM_MASTERS = 2; + static final int NUM_RS = 6; MiniHBaseCluster cluster; HMaster master; Configuration conf; - HBaseTestingUtility TEST_UTIL; + static HBaseTestingUtility TEST_UTIL; + static MiniDFSCluster dfsCluster; + static MiniZooKeeperCluster zkCluster; + + @BeforeClass + public static void setup() throws Exception { + TEST_UTIL = new HBaseTestingUtility(HBaseConfiguration.create()); + dfsCluster = TEST_UTIL.startMiniDFSCluster(1); + zkCluster = TEST_UTIL.startMiniZKCluster(); + } + + @AfterClass + public static void tearDown() throws IOException { + TEST_UTIL.shutdownMiniZKCluster(); + TEST_UTIL.shutdownMiniDFSCluster(); + } private void startCluster(int num_rs) throws Exception{ conf = HBaseConfiguration.create(); @@ -130,7 +151,9 @@ public class TestDistributedLogSplitting { conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1); conf.setFloat(HConstants.LOAD_BALANCER_SLOP_KEY, (float) 100.0); // no load balancing TEST_UTIL = new HBaseTestingUtility(conf); - TEST_UTIL.startMiniCluster(NUM_MASTERS, num_rs); + TEST_UTIL.setDFSCluster(dfsCluster); + TEST_UTIL.setZkCluster(zkCluster); + TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, num_rs); cluster = TEST_UTIL.getHBaseCluster(); LOG.info("Waiting for active/ready master"); cluster.waitForActiveAndReadyMaster(); @@ -146,7 +169,9 @@ public class TestDistributedLogSplitting { mt.getMaster().abort("closing...", new Exception("Trace info")); } - TEST_UTIL.shutdownMiniCluster(); + TEST_UTIL.shutdownMiniHBaseCluster(); + TEST_UTIL.getTestFileSystem().delete(FSUtils.getRootDir(TEST_UTIL.getConfiguration()), true); + ZKUtil.deleteNodeRecursively(TEST_UTIL.getZooKeeperWatcher(), "/hbase"); } @Test (timeout=300000) @@ -169,7 +194,7 @@ public class TestDistributedLogSplitting { installTable(new ZooKeeperWatcher(conf, "table-creation", null), "table", "family", 40); - byte[] table = Bytes.toBytes("table"); + TableName table = TableName.valueOf("table"); List regions = null; HRegionServer hrs = null; for (int i = 0; i < NUM_RS; i++) { @@ -177,7 +202,7 @@ public class TestDistributedLogSplitting { hrs = rsts.get(i).getRegionServer(); regions = ProtobufUtil.getOnlineRegions(hrs); for (HRegionInfo region : regions) { - if (region.getTableNameAsString().equalsIgnoreCase("table")) { + if (region.getTableName().getNameAsString().equalsIgnoreCase("table")) { foundRs = true; break; } @@ -191,7 +216,8 @@ public class TestDistributedLogSplitting { Iterator it = regions.iterator(); while (it.hasNext()) { HRegionInfo region = it.next(); - if (region.isMetaTable()) { + if (region.getTableName().getNamespaceAsString() + .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { it.remove(); } } @@ -202,7 +228,7 @@ public class TestDistributedLogSplitting { int count = 0; for (HRegionInfo hri : regions) { - Path tdir = HTableDescriptor.getTableDir(rootdir, table); + Path tdir = FSUtils.getTableDir(rootdir, table); @SuppressWarnings("deprecation") Path editsdir = HLogUtil.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, hri.getEncodedName())); @@ -639,12 +665,13 @@ public class TestDistributedLogSplitting { isCarryingMeta = true; break; } - if (tableName != null && !tableName.equalsIgnoreCase(region.getTableNameAsString())) { + if (tableName != null && + !tableName.equalsIgnoreCase(region.getTableName().getNameAsString())) { // make sure that we find a RS has online regions for both "table" and "disableTable" hasRegionsForBothTables = true; break; } else if (tableName == null) { - tableName = region.getTableNameAsString(); + tableName = region.getTableName().getNameAsString(); } } if (isCarryingMeta) { @@ -705,7 +732,7 @@ public class TestDistributedLogSplitting { int count = 0; FileSystem fs = master.getMasterFileSystem().getFileSystem(); Path rootdir = FSUtils.getRootDir(conf); - Path tdir = HTableDescriptor.getTableDir(rootdir, Bytes.toBytes("disableTable")); + Path tdir = FSUtils.getTableDir(rootdir, TableName.valueOf("disableTable")); for (HRegionInfo hri : regions) { @SuppressWarnings("deprecation") Path editsdir = @@ -1059,19 +1086,19 @@ public class TestDistributedLogSplitting { LOG.debug("Waiting for no more RIT\n"); blockUntilNoRIT(zkw, master); NavigableSet regions = getAllOnlineRegions(cluster); - LOG.debug("Verifying only catalog regions are assigned\n"); - if (regions.size() != 1) { + LOG.debug("Verifying only catalog and namespace regions are assigned\n"); + if (regions.size() != 2) { for (String oregion : regions) LOG.debug("Region still online: " + oregion); } - assertEquals(1 + existingRegions, regions.size()); + assertEquals(2 + existingRegions, regions.size()); LOG.debug("Enabling table\n"); TEST_UTIL.getHBaseAdmin().enableTable(table); LOG.debug("Waiting for no more RIT\n"); blockUntilNoRIT(zkw, master); LOG.debug("Verifying there are " + numRegions + " assigned on cluster\n"); regions = getAllOnlineRegions(cluster); - assertEquals(numRegions + 1 + existingRegions, regions.size()); + assertEquals(numRegions + 2 + existingRegions, regions.size()); return ht; } @@ -1085,7 +1112,7 @@ public class TestDistributedLogSplitting { HRegionServer hrs = rst.getRegionServer(); List hris = ProtobufUtil.getOnlineRegions(hrs); for (HRegionInfo hri : hris) { - if (hri.isMetaTable()) { + if (HTableDescriptor.isSystemTable(hri.getTableName())) { continue; } LOG.debug("adding data to rs = " + rst.getName() + @@ -1104,16 +1131,22 @@ public class TestDistributedLogSplitting { public void makeHLog(HLog log, List regions, String tname, String fname, int num_edits, int edit_size, boolean closeLog) throws IOException { - + TableName fullTName = TableName.valueOf(tname); // remove root and meta region regions.remove(HRegionInfo.FIRST_META_REGIONINFO); - byte[] table = Bytes.toBytes(tname); - HTableDescriptor htd = new HTableDescriptor(tname); + + for(Iterator iter = regions.iterator(); iter.hasNext(); ) { + HRegionInfo regionInfo = iter.next(); + if(HTableDescriptor.isSystemTable(regionInfo.getTableName())) { + iter.remove(); + } + } + HTableDescriptor htd = new HTableDescriptor(fullTName); byte[] value = new byte[edit_size]; List hris = new ArrayList(); for (HRegionInfo region : regions) { - if (!region.getTableNameAsString().equalsIgnoreCase(tname)) { + if (!region.getTableName().getNameAsString().equalsIgnoreCase(tname)) { continue; } hris.add(region); @@ -1139,7 +1172,7 @@ public class TestDistributedLogSplitting { byte[] family = Bytes.toBytes(fname); byte[] qualifier = Bytes.toBytes("c" + Integer.toString(i)); e.add(new KeyValue(row, family, qualifier, System.currentTimeMillis(), value)); - log.append(curRegionInfo, table, e, System.currentTimeMillis(), htd); + log.append(curRegionInfo, fullTName, e, System.currentTimeMillis(), htd); counts[i % n] += 1; } } @@ -1286,7 +1319,7 @@ public class TestDistributedLogSplitting { if (region.isMetaRegion()) { isCarryingMeta = true; } - if (tableName == null || region.getTableNameAsString().equalsIgnoreCase(tableName)) { + if (tableName == null || region.getTableName().getNameAsString().equals(tableName)) { foundTableRegion = true; } if (foundTableRegion && (isCarryingMeta || !hasMetaRegion)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java index 04c5e55..f4efe5e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java @@ -43,7 +43,8 @@ import static org.junit.Assert.*; public class TestMaster { private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static final Log LOG = LogFactory.getLog(TestMaster.class); - private static final byte[] TABLENAME = Bytes.toBytes("TestMaster"); + private static final TableName TABLENAME = + TableName.valueOf("TestMaster"); private static final byte[] FAMILYNAME = Bytes.toBytes("fam"); private static HBaseAdmin admin; @@ -65,14 +66,12 @@ public class TestMaster { HMaster m = cluster.getMaster(); HTable ht = TEST_UTIL.createTable(TABLENAME, FAMILYNAME); - assertTrue(m.assignmentManager.getZKTable().isEnabledTable - (Bytes.toString(TABLENAME))); + assertTrue(m.assignmentManager.getZKTable().isEnabledTable(TABLENAME)); TEST_UTIL.loadTable(ht, FAMILYNAME); ht.close(); List> tableRegions = - MetaReader.getTableRegionsAndLocations(m.getCatalogTracker(), - Bytes.toString(TABLENAME)); + MetaReader.getTableRegionsAndLocations(m.getCatalogTracker(), TABLENAME); LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions)); assertEquals(1, tableRegions.size()); assertArrayEquals(HConstants.EMPTY_START_ROW, @@ -82,7 +81,7 @@ public class TestMaster { // Now trigger a split and stop when the split is in progress LOG.info("Splitting table"); - TEST_UTIL.getHBaseAdmin().split(TABLENAME); + TEST_UTIL.getHBaseAdmin().split(TABLENAME.getName()); LOG.info("Waiting for split result to be about to open"); while (!m.assignmentManager.wasSplitHandlerCalled()) { Thread.sleep(100); @@ -122,7 +121,8 @@ public class TestMaster { @Test public void testMoveThrowsUnknownRegionException() throws IOException { - byte[] tableName = Bytes.toBytes("testMoveThrowsUnknownRegionException"); + TableName tableName = + TableName.valueOf("testMoveThrowsUnknownRegionException"); HTableDescriptor htd = new HTableDescriptor(tableName); HColumnDescriptor hcd = new HColumnDescriptor("value"); htd.addFamily(hcd); @@ -144,7 +144,7 @@ public class TestMaster { public void testMoveThrowsPleaseHoldException() throws IOException { byte[] tableName = Bytes.toBytes("testMoveThrowsPleaseHoldException"); HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor hcd = new HColumnDescriptor("value"); htd.addFamily(hcd); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index 8efb9c2..d35ca20 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -188,7 +189,7 @@ public class TestMasterFailover { }; byte [] enabledTable = Bytes.toBytes("enabledTable"); - HTableDescriptor htdEnabled = new HTableDescriptor(enabledTable); + HTableDescriptor htdEnabled = new HTableDescriptor(TableName.valueOf(enabledTable)); htdEnabled.addFamily(new HColumnDescriptor(FAMILY)); FileSystem filesystem = FileSystem.get(conf); @@ -197,26 +198,26 @@ public class TestMasterFailover { // Write the .tableinfo fstd.createTableDescriptor(htdEnabled); - HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null); + HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getTableName(), null, null); createRegion(hriEnabled, rootdir, conf, htdEnabled); List enabledRegions = TEST_UTIL.createMultiRegionsInMeta( TEST_UTIL.getConfiguration(), htdEnabled, SPLIT_KEYS); - byte [] disabledTable = Bytes.toBytes("disabledTable"); + TableName disabledTable = TableName.valueOf("disabledTable"); HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable); htdDisabled.addFamily(new HColumnDescriptor(FAMILY)); // Write the .tableinfo fstd.createTableDescriptor(htdDisabled); - HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null); + HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getTableName(), null, null); createRegion(hriDisabled, rootdir, conf, htdDisabled); List disabledRegions = TEST_UTIL.createMultiRegionsInMeta( TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS); - log("Regions in META have been created"); + log("Regions in META and namespace have been created"); - // at this point we only expect 2 regions to be assigned out (catalogs) - assertEquals(1, cluster.countServedRegions()); + // at this point we only expect 3 regions to be assigned out (catalogs and namespace) + assertEquals(2, cluster.countServedRegions()); // Let's just assign everything to first RS HRegionServer hrs = cluster.getRegionServer(0); @@ -267,7 +268,7 @@ public class TestMasterFailover { // Disable the disabledTable in ZK ZKTable zktable = new ZKTable(zkw); - zktable.setDisabledTable(Bytes.toString(disabledTable)); + zktable.setDisabledTable(disabledTable); /* * ZK = OFFLINE @@ -492,35 +493,36 @@ public class TestMasterFailover { TEST_UTIL.getRegionSplitStartKeys(Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 30); byte [] enabledTable = Bytes.toBytes("enabledTable"); - HTableDescriptor htdEnabled = new HTableDescriptor(enabledTable); + HTableDescriptor htdEnabled = new HTableDescriptor(TableName.valueOf(enabledTable)); htdEnabled.addFamily(new HColumnDescriptor(FAMILY)); FileSystem filesystem = FileSystem.get(conf); Path rootdir = FSUtils.getRootDir(conf); FSTableDescriptors fstd = new FSTableDescriptors(filesystem, rootdir); // Write the .tableinfo fstd.createTableDescriptor(htdEnabled); - HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), + HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getTableName(), null, null); createRegion(hriEnabled, rootdir, conf, htdEnabled); List enabledRegions = TEST_UTIL.createMultiRegionsInMeta( TEST_UTIL.getConfiguration(), htdEnabled, SPLIT_KEYS); - byte [] disabledTable = Bytes.toBytes("disabledTable"); + TableName disabledTable = + TableName.valueOf("disabledTable"); HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable); htdDisabled.addFamily(new HColumnDescriptor(FAMILY)); // Write the .tableinfo fstd.createTableDescriptor(htdDisabled); - HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null); + HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getTableName(), null, null); createRegion(hriDisabled, rootdir, conf, htdDisabled); List disabledRegions = TEST_UTIL.createMultiRegionsInMeta( TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS); - log("Regions in META have been created"); + log("Regions in META and Namespace have been created"); - // at this point we only expect 2 regions to be assigned out (catalogs) - assertEquals(1, cluster.countServedRegions()); + // at this point we only expect 2 regions to be assigned out (catalogs and namespace ) + assertEquals(2, cluster.countServedRegions()); // The first RS will stay online List regionservers = @@ -558,7 +560,7 @@ public class TestMasterFailover { log("Assignment completed"); assertTrue(" Table must be enabled.", master.getAssignmentManager() - .getZKTable().isEnabledTable("enabledTable")); + .getZKTable().isEnabledTable(TableName.valueOf("enabledTable"))); // we also need regions assigned out on the dead server List enabledAndOnDeadRegions = new ArrayList(); enabledAndOnDeadRegions.addAll(enabledRegions.subList(0, 6)); @@ -619,10 +621,10 @@ public class TestMasterFailover { // Disable the disabledTable in ZK ZKTable zktable = new ZKTable(zkw); - zktable.setDisabledTable(Bytes.toString(disabledTable)); + zktable.setDisabledTable(disabledTable); assertTrue(" The enabled table should be identified on master fail over.", - zktable.isEnabledTable("enabledTable")); + zktable.isEnabledTable(TableName.valueOf("enabledTable"))); /* * ZK = CLOSING diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java index d67880b..0c64896 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java @@ -88,7 +88,7 @@ public class TestMasterMetrics { public void testDefaultMasterMetrics() throws Exception { MetricsMasterSource masterSource = master.getMetrics().getMetricsSource(); metricsHelper.assertGauge( "numRegionServers", 1, masterSource); - metricsHelper.assertGauge( "averageLoad", 1, masterSource); + metricsHelper.assertGauge( "averageLoad", 2, masterSource); metricsHelper.assertGauge( "numDeadRegionServers", 0, masterSource); metricsHelper.assertGauge("masterStartTime", master.getMasterStartTime(), masterSource); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java index 423c5c6..b6bdf83 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java @@ -30,6 +30,7 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -43,8 +44,8 @@ import org.apache.hadoop.hbase.catalog.MetaMockingUtil; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker; @@ -52,7 +53,6 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.MediumTests; import org.apache.zookeeper.KeeperException; -import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; @@ -156,7 +156,7 @@ public class TestMasterNoCluster { // Put some data into the servers. Make it look like sn0 has the metaH // Put data into sn2 so it looks like it has a few regions for a table named 't'. MetaRegionTracker.setMetaLocation(rs0.getZooKeeper(), rs0.getServerName()); - final byte [] tableName = Bytes.toBytes("t"); + final TableName tableName = TableName.valueOf("t"); Result [] results = new Result [] { MetaMockingUtil.getMetaTableRowResult( new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HBaseTestingUtility.KEYS[1]), @@ -209,6 +209,11 @@ public class TestMasterNoCluster { rs0, rs0, rs0.getServerName(), HRegionInfo.ROOT_REGIONINFO); return new CatalogTracker(zk, conf, connection, abortable); } + + @Override + void assignSystemTables(MonitoredTask status) + throws IOException, InterruptedException, KeeperException { + } }; master.start(); @@ -243,9 +248,9 @@ public class TestMasterNoCluster { * @throws DeserializationException * @throws ServiceException */ - @Test (timeout=30000) + @Test (timeout=60000) public void testCatalogDeploys() - throws IOException, KeeperException, InterruptedException, DeserializationException, ServiceException { + throws Exception { final Configuration conf = TESTUTIL.getConfiguration(); conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1); conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, 1); @@ -295,6 +300,11 @@ public class TestMasterNoCluster { rs0, rs0, rs0.getServerName(), HRegionInfo.FIRST_META_REGIONINFO); return new CatalogTracker(zk, conf, connection, abortable); } + + @Override + void assignSystemTables(MonitoredTask status) + throws IOException, InterruptedException, KeeperException { + } }; master.start(); LOG.info("Master has started"); @@ -388,6 +398,11 @@ public class TestMasterNoCluster { rs0, rs0, rs0.getServerName(), HRegionInfo.ROOT_REGIONINFO); return new CatalogTracker(zk, conf, connection, abortable); } + + @Override + void assignSystemTables(MonitoredTask status) + throws IOException, InterruptedException, KeeperException { + } }; master.start(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java index 51db3c4..ef7812f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java @@ -83,7 +83,7 @@ public class TestMasterRestartAfterDisablingTable { NavigableSet regions = getAllOnlineRegions(cluster); assertEquals( "The number of regions for the table tableRestart should be 0 and only" - + "the catalog tables should be present.", 1, regions.size()); + + "the catalog and namespace tables should be present.", 2, regions.size()); List masterThreads = cluster.getMasterThreads(); MasterThread activeMaster = null; @@ -99,7 +99,7 @@ public class TestMasterRestartAfterDisablingTable { assertTrue("The table should not be in enabled state", cluster.getMaster() .getAssignmentManager().getZKTable().isDisablingOrDisabledTable( - "tableRestart")); + TableName.valueOf("tableRestart"))); log("Enabling table\n"); // Need a new Admin, the previous one is on the old master HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); @@ -110,10 +110,11 @@ public class TestMasterRestartAfterDisablingTable { log("Verifying there are " + numRegions + " assigned on cluster\n"); regions = getAllOnlineRegions(cluster); assertEquals( - "The assigned regions were not onlined after master switch except for the catalog tables.", - 5, regions.size()); + "The assigned regions were not onlined after master switch except for the catalog and namespace tables.", + 6, regions.size()); assertTrue("The table should be in enabled state", cluster.getMaster() - .getAssignmentManager().getZKTable().isEnabledTable("tableRestart")); + .getAssignmentManager().getZKTable() + .isEnabledTable(TableName.valueOf("tableRestart"))); ht.close(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java index 5b0c584..09c545f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java @@ -58,9 +58,10 @@ public class TestMasterStatusServlet { static final ServerName FAKE_HOST = new ServerName("fakehost", 12345, 1234567890); static final HTableDescriptor FAKE_TABLE = - new HTableDescriptor("mytable"); + new HTableDescriptor(TableName.valueOf("mytable")); static final HRegionInfo FAKE_HRI = - new HRegionInfo(FAKE_TABLE.getName(), Bytes.toBytes("a"), Bytes.toBytes("b")); + new HRegionInfo(FAKE_TABLE.getTableName(), + Bytes.toBytes("a"), Bytes.toBytes("b")); @Before public void setupBasicMocks() { @@ -106,8 +107,8 @@ public class TestMasterStatusServlet { private void setupMockTables() throws IOException { HTableDescriptor tables[] = new HTableDescriptor[] { - new HTableDescriptor("foo"), - new HTableDescriptor("bar") + new HTableDescriptor(TableName.valueOf("foo")), + new HTableDescriptor(TableName.valueOf("bar")) }; Mockito.doReturn(tables).when(admin).listTables(); } @@ -158,7 +159,7 @@ public class TestMasterStatusServlet { NavigableMap regionsInTransition = Maps.newTreeMap(); for (byte i = 0; i < 100; i++) { - HRegionInfo hri = new HRegionInfo(FAKE_TABLE.getName(), + HRegionInfo hri = new HRegionInfo(FAKE_TABLE.getTableName(), new byte[]{i}, new byte[]{(byte) (i+1)}); regionsInTransition.put(hri.getEncodedName(), new RegionState(hri, RegionState.State.CLOSING, 12345L, FAKE_HOST)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java index f9a27c8..749a439 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java @@ -22,6 +22,7 @@ import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -61,7 +62,7 @@ public class TestMasterTransitions { TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true); TEST_UTIL.startMiniCluster(2); // Create a table of three families. This will assign a region. - byte[] tableName = Bytes.toBytes(TABLENAME); + TableName tableName = TableName.valueOf(TABLENAME); TEST_UTIL.createTable(tableName, FAMILIES); HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME); int countOfRegions = TEST_UTIL.createMultiRegions(t, getTestFamily()); @@ -480,7 +481,7 @@ public class TestMasterTransitions { private static int addToEachStartKey(final int expected) throws IOException { HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME); HTable meta = new HTable(TEST_UTIL.getConfiguration(), - HConstants.META_TABLE_NAME); + TableName.META_TABLE_NAME); int rows = 0; Scan scan = new Scan(); scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); @@ -488,8 +489,14 @@ public class TestMasterTransitions { for (Result r = null; (r = s.next()) != null;) { HRegionInfo hri = HRegionInfo.getHRegionInfo(r); if (hri == null) break; + if (!hri.getTableName().getNameAsString().equals(TABLENAME)) { + continue; + } // If start key, add 'aaa'. + if(!hri.getTableName().getNameAsString().equals(TABLENAME)) { + continue; + } byte [] row = getStartKey(hri); Put p = new Put(row); p.setDurability(Durability.SKIP_WAL); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java index bd20d33..4f2f5ad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java @@ -117,8 +117,8 @@ public class TestOpenedRegionHandler { TEST_UTIL.startMiniZKCluster(); final Server server = new MockServer(TEST_UTIL); HTableDescriptor htd = new HTableDescriptor( - "testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches"); - HRegionInfo hri = new HRegionInfo(htd.getName(), + TableName.valueOf("testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches")); + HRegionInfo hri = new HRegionInfo(htd.getTableName(), Bytes.toBytes(testIndex), Bytes.toBytes(testIndex + 1)); region = HRegion.createHRegion(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd); assertNotNull(region); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java index 4b2d598..a610f7c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java @@ -35,6 +35,7 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -42,6 +43,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; @@ -100,7 +102,7 @@ public class TestRegionPlacement { servers.add(server); } List regions = new ArrayList(1); - HRegionInfo region = new HRegionInfo(("foobar").getBytes()); + HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar")); regions.add(region); Map> assignmentMap = balancer.roundRobinAssignment(regions, servers); @@ -160,7 +162,7 @@ public class TestRegionPlacement { servers.add(server); } List regions = new ArrayList(1); - HRegionInfo region = new HRegionInfo(("foobar").getBytes()); + HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar")); regions.add(region); ServerName serverBefore = balancer.randomAssignment(region, servers); List favoredNodesBefore = @@ -256,7 +258,8 @@ public class TestRegionPlacement { MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); for (int i = 0; i < SLAVES; i++) { HRegionServer rs = cluster.getRegionServer(i); - for (HRegion region: rs.getOnlineRegions(Bytes.toBytes("testRegionAssignment"))) { + for (HRegion region: rs.getOnlineRegions( + TableName.valueOf("testRegionAssignment"))) { InetSocketAddress[] favoredSocketAddress = rs.getFavoredNodesForRegion( region.getRegionInfo().getEncodedName()); ServerName[] favoredServerList = favoredNodesAssignmentPlan.get(region.getRegionInfo()); @@ -268,7 +271,7 @@ public class TestRegionPlacement { // Verify they are ROOT and META regions since no favored nodes assertNull(favoredSocketAddress); assertTrue("User region " + - region.getTableDesc().getNameAsString() + + region.getTableDesc().getTableName() + " should have favored nodes", (desc.isRootRegion() || desc.isMetaRegion())); } else { @@ -310,6 +313,10 @@ public class TestRegionPlacement { public boolean processRow(Result result) throws IOException { try { HRegionInfo info = MetaScanner.getHRegionInfo(result); + if(info.getTableName().getNamespaceAsString() + .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { + return true; + } byte[] server = result.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER); byte[] favoredNodes = result.getValue(HConstants.CATALOG_FAMILY, @@ -381,7 +388,7 @@ public class TestRegionPlacement { splitKeys[i - 1] = new byte[] { splitKey, splitKey, splitKey }; } - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc, splitKeys); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java index b426520..735f3e9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java @@ -102,7 +102,7 @@ public class TestRestartCluster { List allRegions = MetaScanner.listAllRegions(UTIL.getConfiguration(), true); - assertEquals(3, allRegions.size()); + assertEquals(4, allRegions.size()); LOG.info("\n\nShutting down cluster"); UTIL.shutdownMiniHBaseCluster(); @@ -116,10 +116,8 @@ public class TestRestartCluster { // Need to use a new 'Configuration' so we make a new HConnection. // Otherwise we're reusing an HConnection that has gone stale because // the shutdown of the cluster also called shut of the connection. - allRegions = MetaScanner. - listAllRegions(new Configuration(UTIL.getConfiguration()), true); - assertEquals(3, allRegions.size()); - + allRegions = MetaScanner.listAllRegions(new Configuration(UTIL.getConfiguration()), true); + assertEquals(4, allRegions.size()); LOG.info("\n\nWaiting for tables to be available"); for(byte [] TABLE: TABLES) { try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java index f2e4b46..d1bcd1f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java @@ -87,11 +87,11 @@ public class TestRollingRestart { log("Waiting for no more RIT\n"); blockUntilNoRIT(zkw, master); NavigableSet regions = getAllOnlineRegions(cluster); - log("Verifying only catalog regions are assigned\n"); - if (regions.size() != 1) { + log("Verifying only catalog and namespace regions are assigned\n"); + if (regions.size() != 2) { for (String oregion : regions) log("Region still online: " + oregion); } - assertEquals(1, regions.size()); + assertEquals(2, regions.size()); log("Enabling table\n"); TEST_UTIL.getHBaseAdmin().enableTable(table); log("Waiting for no more RIT\n"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java index 42880e2..d2a87b9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java @@ -38,6 +38,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Chore; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -72,7 +73,8 @@ public class TestTableLockManager { private static final Log LOG = LogFactory.getLog(TestTableLockManager.class); - private static final byte[] TABLE_NAME = Bytes.toBytes("TestTableLevelLocks"); + private static final TableName TABLE_NAME = + TableName.valueOf("TestTableLevelLocks"); private static final byte[] FAMILY = Bytes.toBytes("f1"); @@ -133,18 +135,18 @@ public class TestTableLockManager { public static class TestLockTimeoutExceptionMasterObserver extends BaseMasterObserver { @Override public void preDeleteColumnHandler(ObserverContext ctx, - byte[] tableName, byte[] c) throws IOException { + TableName tableName, byte[] c) throws IOException { deleteColumn.countDown(); } @Override public void postDeleteColumnHandler(ObserverContext ctx, - byte[] tableName, byte[] c) throws IOException { + TableName tableName, byte[] c) throws IOException { Threads.sleep(10000); } @Override public void preAddColumnHandler(ObserverContext ctx, - byte[] tableName, HColumnDescriptor column) throws IOException { + TableName tableName, HColumnDescriptor column) throws IOException { fail("Add column should have timeouted out for acquiring the table lock"); } } @@ -198,14 +200,14 @@ public class TestTableLockManager { public static class TestAlterAndDisableMasterObserver extends BaseMasterObserver { @Override public void preAddColumnHandler(ObserverContext ctx, - byte[] tableName, HColumnDescriptor column) throws IOException { + TableName tableName, HColumnDescriptor column) throws IOException { LOG.debug("addColumn called"); addColumn.countDown(); } @Override public void postAddColumnHandler(ObserverContext ctx, - byte[] tableName, HColumnDescriptor column) throws IOException { + TableName tableName, HColumnDescriptor column) throws IOException { Threads.sleep(6000); try { ctx.getEnvironment().getMasterServices().checkTableModifiable(tableName); @@ -219,7 +221,7 @@ public class TestTableLockManager { @Override public void preDisableTable(ObserverContext ctx, - byte[] tableName) throws IOException { + TableName tableName) throws IOException { try { LOG.debug("Waiting for addColumn to be processed first"); //wait for addColumn to be processed first @@ -232,7 +234,7 @@ public class TestTableLockManager { @Override public void postDisableTableHandler(ObserverContext ctx, - byte[] tableName) throws IOException { + TableName tableName) throws IOException { Threads.sleep(3000); } } @@ -247,7 +249,7 @@ public class TestTableLockManager { //ensure that znode for the table node has been deleted final ZooKeeperWatcher zkWatcher = TEST_UTIL.getZooKeeperWatcher(); - final String znode = ZKUtil.joinZNode(zkWatcher.tableLockZNode, Bytes.toString(TABLE_NAME)); + final String znode = ZKUtil.joinZNode(zkWatcher.tableLockZNode, TABLE_NAME.getNameAsString()); TEST_UTIL.waitFor(5000, new Waiter.Predicate() { @Override @@ -257,7 +259,7 @@ public class TestTableLockManager { } }); int ver = ZKUtil.checkExists(zkWatcher, - ZKUtil.joinZNode(zkWatcher.tableLockZNode, Bytes.toString(TABLE_NAME))); + ZKUtil.joinZNode(zkWatcher.tableLockZNode, TABLE_NAME.getNameAsString())); assertTrue("Unexpected znode version " + ver, ver < 0); } @@ -285,7 +287,8 @@ public class TestTableLockManager { @Override public Void call() throws Exception { writeLocksAttempted.countDown(); - lockManager.writeLock(Bytes.toBytes(table), "testReapAllTableLocks").acquire(); + lockManager.writeLock(TableName.valueOf(table), + "testReapAllTableLocks").acquire(); writeLocksObtained.countDown(); return null; } @@ -304,7 +307,9 @@ public class TestTableLockManager { TEST_UTIL.getConfiguration(), TEST_UTIL.getZooKeeperWatcher(), serverName); //should not throw table lock timeout exception - zeroTimeoutLockManager.writeLock(Bytes.toBytes(tables[tables.length -1]), "zero timeout") + zeroTimeoutLockManager.writeLock( + TableName.valueOf(tables[tables.length - 1]), + "zero timeout") .acquire(); executor.shutdownNow(); @@ -321,7 +326,7 @@ public class TestTableLockManager { LoadTestTool loadTool = new LoadTestTool(); loadTool.setConf(TEST_UTIL.getConfiguration()); int numKeys = 10000; - final byte[] tableName = Bytes.toBytes("testTableReadLock"); + final TableName tableName = TableName.valueOf("testTableReadLock"); final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); final HTableDescriptor desc = new HTableDescriptor(tableName); final byte[] family = Bytes.toBytes("test_cf"); @@ -329,7 +334,7 @@ public class TestTableLockManager { admin.createTable(desc); // create with one region // write some data, not much - int ret = loadTool.run(new String[] { "-tn", Bytes.toString(tableName), "-write", + int ret = loadTool.run(new String[] { "-tn", tableName.getNameAsString(), "-write", String.format("%d:%d:%d", 1, 10, 10), "-num_keys", String.valueOf(numKeys), "-skip_init" }); if (0 != ret) { String errorMsg = "Load failed with error code " + ret; @@ -406,7 +411,7 @@ public class TestTableLockManager { assertTrue(newFamilyValues > familyValues); // at least one alter went // through - ret = loadTool.run(new String[] { "-tn", Bytes.toString(tableName), "-read", "100:10", + ret = loadTool.run(new String[] { "-tn", tableName.getNameAsString(), "-read", "100:10", "-num_keys", String.valueOf(numKeys), "-skip_init" }); if (0 != ret) { String errorMsg = "Verify failed with error code " + ret; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java index d753216..5ec804b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java @@ -30,6 +30,7 @@ import java.util.Collection; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -62,7 +63,8 @@ import org.mockito.internal.util.reflection.Whitebox; public class TestZKBasedOpenCloseRegion { private static final Log LOG = LogFactory.getLog(TestZKBasedOpenCloseRegion.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final String TABLENAME = "TestZKBasedOpenCloseRegion"; + private static final TableName TABLENAME = + TableName.valueOf("TestZKBasedOpenCloseRegion"); private static final byte [][] FAMILIES = new byte [][] {Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")}; private static int countOfRegions; @@ -72,7 +74,7 @@ public class TestZKBasedOpenCloseRegion { c.setBoolean("dfs.support.append", true); c.setInt("hbase.regionserver.info.port", 0); TEST_UTIL.startMiniCluster(2); - TEST_UTIL.createTable(Bytes.toBytes(TABLENAME), FAMILIES); + TEST_UTIL.createTable(TABLENAME, FAMILIES); HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME); countOfRegions = TEST_UTIL.createMultiRegions(t, getTestFamily()); waitUntilAllRegionsAssigned(); @@ -219,13 +221,13 @@ public class TestZKBasedOpenCloseRegion { */ @Test public void testRegionOpenFailsDueToIOException() throws Exception { - HRegionInfo REGIONINFO = new HRegionInfo(Bytes.toBytes("t"), + HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("t"), HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW); HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0); TableDescriptors htd = Mockito.mock(TableDescriptors.class); Object orizinalState = Whitebox.getInternalState(regionServer,"tableDescriptors"); Whitebox.setInternalState(regionServer, "tableDescriptors", htd); - Mockito.doThrow(new IOException()).when(htd).get((byte[]) Mockito.any()); + Mockito.doThrow(new IOException()).when(htd).get((TableName) Mockito.any()); try { ProtobufUtil.openRegion(regionServer, REGIONINFO); fail("It should throw IOException "); @@ -238,7 +240,7 @@ public class TestZKBasedOpenCloseRegion { private static void waitUntilAllRegionsAssigned() throws IOException { - HTable meta = new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME); + HTable meta = new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME); while (true) { int rows = 0; Scan scan = new Scan(); @@ -273,7 +275,7 @@ public class TestZKBasedOpenCloseRegion { private static int addToEachStartKey(final int expected) throws IOException { HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME); HTable meta = new HTable(TEST_UTIL.getConfiguration(), - HConstants.META_TABLE_NAME); + TableName.META_TABLE_NAME); int rows = 0; Scan scan = new Scan(); scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); @@ -281,7 +283,9 @@ public class TestZKBasedOpenCloseRegion { for (Result r = null; (r = s.next()) != null;) { HRegionInfo hri = HRegionInfo.getHRegionInfo(r); if (hri == null) break; - + if(!hri.getTableName().equals(TABLENAME)) { + continue; + } // If start key, add 'aaa'. byte [] row = getStartKey(hri); Put p = new Put(row); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java index 7a7b512..9440b71 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java @@ -30,6 +30,7 @@ import java.util.SortedSet; import java.util.TreeMap; import java.util.TreeSet; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.master.RegionPlan; @@ -197,7 +198,8 @@ public class BalancerTestBase { } Bytes.putInt(start, 0, numRegions << 1); Bytes.putInt(end, 0, (numRegions << 1) + 1); - byte[] tableName = Bytes.toBytes("table" + (numTables > 0 ? rand.nextInt(numTables) : i)); + TableName tableName = + TableName.valueOf("table" + (numTables > 0 ? rand.nextInt(numTables) : i)); HRegionInfo hri = new HRegionInfo(tableName, start, end, false, regionId++); regions.add(hri); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredNodeAssignmentHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredNodeAssignmentHelper.java index 84df3b7..bcf1318 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredNodeAssignmentHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredNodeAssignmentHelper.java @@ -28,6 +28,7 @@ import java.util.SortedMap; import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.ServerName; @@ -271,7 +272,7 @@ public class TestFavoredNodeAssignmentHelper { // create regions List regions = new ArrayList(regionCount); for (int i = 0; i < regionCount; i++) { - HRegionInfo region = new HRegionInfo(("foobar"+i).getBytes()); + HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar" + i)); regions.add(region); } // place the regions @@ -299,7 +300,7 @@ public class TestFavoredNodeAssignmentHelper { // create some regions List regions = new ArrayList(regionCount); for (int i = 0; i < regionCount; i++) { - HRegionInfo region = new HRegionInfo(("foobar" + i).getBytes()); + HRegionInfo region = new HRegionInfo(TableName.valueOf("foobar" + i)); regions.add(region); } // place those regions in primary RSs diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java index 01a090b..e74093d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java @@ -27,16 +27,14 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SmallTests; -import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.io.HFileLink; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -60,13 +58,13 @@ public class TestHFileLinkCleaner { Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = FileSystem.get(conf); - final String tableName = "test-table"; - final String tableLinkName = "test-link"; + final TableName tableName = TableName.valueOf("test-table"); + final TableName tableLinkName = TableName.valueOf("test-link"); final String hfileName = "1234567890"; final String familyName = "cf"; - HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tableName)); - HRegionInfo hriLink = new HRegionInfo(Bytes.toBytes(tableLinkName)); + HRegionInfo hri = new HRegionInfo(tableName); + HRegionInfo hriLink = new HRegionInfo(tableLinkName); Path archiveDir = HFileArchiveUtil.getArchivePath(conf); Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf, @@ -103,7 +101,8 @@ public class TestHFileLinkCleaner { assertTrue(fs.exists(hfilePath)); // Link backref can be removed - fs.rename(new Path(rootDir, tableLinkName), new Path(archiveDir, tableLinkName)); + fs.rename(FSUtils.getTableDir(rootDir, tableLinkName), + FSUtils.getTableDir(archiveDir, tableLinkName)); cleaner.chore(); assertFalse("Link should be deleted", fs.exists(linkBackRef)); @@ -117,15 +116,15 @@ public class TestHFileLinkCleaner { Thread.sleep(ttl * 2); cleaner.chore(); } - assertFalse("HFile should be deleted", fs.exists(new Path(archiveDir, tableName))); - assertFalse("Link should be deleted", fs.exists(new Path(archiveDir, tableLinkName))); + assertFalse("HFile should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableName))); + assertFalse("Link should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableLinkName))); cleaner.interrupt(); } - private static Path getFamilyDirPath (final Path rootDir, final String table, + private static Path getFamilyDirPath (final Path rootDir, final TableName table, final String region, final String family) { - return new Path(new Path(new Path(rootDir, table), region), family); + return new Path(new Path(FSUtils.getTableDir(rootDir, table), region), family); } static class DummyServer implements Server { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java index 4e42d89..4e97874 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java @@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MediumTests; @@ -42,6 +43,7 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.snapshot.DisabledTableSnapshotHandler; import org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest; @@ -83,9 +85,9 @@ public class TestSnapshotFromMaster { // for hfile archiving test. private static Path archiveDir; - private static final String STRING_TABLE_NAME = "test"; private static final byte[] TEST_FAM = Bytes.toBytes("fam"); - private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME); + private static final TableName TABLE_NAME = + TableName.valueOf("test"); // refresh the cache every 1/2 second private static final long cacheRefreshPeriod = 500; @@ -128,7 +130,7 @@ public class TestSnapshotFromMaster { @Before public void setup() throws Exception { UTIL.createTable(TABLE_NAME, TEST_FAM); - master.getSnapshotManagerForTesting().setSnapshotHandlerForTesting(STRING_TABLE_NAME, null); + master.getSnapshotManagerForTesting().setSnapshotHandlerForTesting(TABLE_NAME, null); } @After @@ -160,6 +162,7 @@ public class TestSnapshotFromMaster { public void testIsDoneContract() throws Exception { IsSnapshotDoneRequest.Builder builder = IsSnapshotDoneRequest.newBuilder(); + String snapshotName = "asyncExpectedFailureTest"; // check that we get an exception when looking up snapshot where one hasn't happened @@ -168,7 +171,7 @@ public class TestSnapshotFromMaster { // and that we get the same issue, even if we specify a name SnapshotDescription desc = SnapshotDescription.newBuilder() - .setName(snapshotName).setTable(STRING_TABLE_NAME).build(); + .setName(snapshotName).setTable(TABLE_NAME.getNameAsString()).build(); builder.setSnapshot(desc); SnapshotTestingUtils.expectSnapshotDoneException(master, builder.build(), UnknownSnapshotException.class); @@ -182,7 +185,7 @@ public class TestSnapshotFromMaster { .thenReturn(EnvironmentEdgeManager.currentTimeMillis()); master.getSnapshotManagerForTesting() - .setSnapshotHandlerForTesting(STRING_TABLE_NAME, mockHandler); + .setSnapshotHandlerForTesting(TABLE_NAME, mockHandler); // if we do a lookup without a snapshot name, we should fail - you should always know your name builder = IsSnapshotDoneRequest.newBuilder(); @@ -322,7 +325,7 @@ public class TestSnapshotFromMaster { LOG.debug(file.getPath()); } // get the archived files for the table - Collection files = getArchivedHFiles(archiveDir, rootDir, fs, STRING_TABLE_NAME); + Collection files = getArchivedHFiles(archiveDir, rootDir, fs, TABLE_NAME); // and make sure that there is a proper subset for (FileStatus file : snapshotHFiles) { @@ -349,7 +352,7 @@ public class TestSnapshotFromMaster { LOG.info("After delete snapshot cleaners run File-System state"); FSUtils.logFileSystemState(fs, rootDir, LOG); - files = getArchivedHFiles(archiveDir, rootDir, fs, STRING_TABLE_NAME); + files = getArchivedHFiles(archiveDir, rootDir, fs, TABLE_NAME); assertEquals("Still have some hfiles in the archive, when their snapshot has been deleted.", 0, files.size()); } @@ -359,8 +362,8 @@ public class TestSnapshotFromMaster { * @throws IOException on expected failure */ private final Collection getArchivedHFiles(Path archiveDir, Path rootDir, - FileSystem fs, String tableName) throws IOException { - Path tableArchive = new Path(archiveDir, tableName); + FileSystem fs, TableName tableName) throws IOException { + Path tableArchive = FSUtils.getTableDir(archiveDir, tableName); FileStatus[] archivedHFiles = SnapshotTestingUtils.listHFiles(fs, tableArchive); List files = new ArrayList(archivedHFiles.length); LOG.debug("Have archived hfiles: " + tableArchive); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestCreateTableHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestCreateTableHandler.java index acad9c8..fc0800c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestCreateTableHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestCreateTableHandler.java @@ -27,6 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -67,9 +68,9 @@ public class TestCreateTableHandler { public void testCreateTableHandlerIfCalledTwoTimesAndFirstOneIsUnderProgress() throws Exception { final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); final HMaster m = cluster.getMaster(); - final HTableDescriptor desc = new HTableDescriptor(TABLENAME); + final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TABLENAME)); desc.addFamily(new HColumnDescriptor(FAMILYNAME)); - final HRegionInfo[] hRegionInfos = new HRegionInfo[] { new HRegionInfo(desc.getName(), null, + final HRegionInfo[] hRegionInfos = new HRegionInfo[] { new HRegionInfo(desc.getTableName(), null, null) }; CustomCreateTableHandler handler = new CustomCreateTableHandler(m, m.getMasterFileSystem(), desc, cluster.getConfiguration(), hRegionInfos, m); @@ -89,15 +90,14 @@ public class TestCreateTableHandler { assertTrue(TEST_UTIL.getHBaseAdmin().isTableEnabled(TABLENAME)); } - @Test (timeout=60000) public void testMasterRestartAfterEnablingNodeIsCreated() throws Exception { byte[] tableName = Bytes.toBytes("testMasterRestartAfterEnablingNodeIsCreated"); final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); final HMaster m = cluster.getMaster(); - final HTableDescriptor desc = new HTableDescriptor(tableName); + final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor(FAMILYNAME)); - final HRegionInfo[] hRegionInfos = new HRegionInfo[] { new HRegionInfo(desc.getName(), null, + final HRegionInfo[] hRegionInfos = new HRegionInfo[] { new HRegionInfo(desc.getTableName(), null, null) }; CustomCreateTableHandler handler = new CustomCreateTableHandler(m, m.getMasterFileSystem(), desc, cluster.getConfiguration(), hRegionInfos, m); @@ -126,12 +126,12 @@ public class TestCreateTableHandler { } @Override - protected List handleCreateHdfsRegions(Path tableRootDir, String tableName) - throws IOException { + protected List handleCreateHdfsRegions(Path tableRootDir, + TableName tableName) throws IOException { if (throwException) { throw new IOException("Test throws exceptions."); } return super.handleCreateHdfsRegions(tableRootDir, tableName); } } -} \ No newline at end of file +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java index 9e638ac..9d828b3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java @@ -28,6 +28,7 @@ import java.io.IOException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; @@ -35,6 +36,7 @@ import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -45,7 +47,8 @@ import org.junit.experimental.categories.Category; public class TestTableDeleteFamilyHandler { private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final String TABLENAME = "column_family_handlers"; + private static final TableName TABLENAME = + TableName.valueOf("column_family_handlers"); private static final byte[][] FAMILIES = new byte[][] { Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") }; @@ -61,12 +64,17 @@ public class TestTableDeleteFamilyHandler { TEST_UTIL.startMiniCluster(2); // Create a table of three families. This will assign a region. - TEST_UTIL.createTable(Bytes.toBytes(TABLENAME), FAMILIES); + TEST_UTIL.createTable(TABLENAME, FAMILIES); HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME); - + while(TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() + .getRegionStates().getRegionsInTransition().size() > 0) { + Thread.sleep(100); + } // Create multiple regions in all the three column families - TEST_UTIL.createMultiRegions(t, FAMILIES[0]); - + while(TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() + .getRegionStates().getRegionsInTransition().size() > 0) { + Thread.sleep(100); + } // Load the table with data for all families TEST_UTIL.loadTable(t, FAMILIES); @@ -77,7 +85,7 @@ public class TestTableDeleteFamilyHandler { @AfterClass public static void afterAllTests() throws Exception { - TEST_UTIL.deleteTable(Bytes.toBytes(TABLENAME)); + TEST_UTIL.deleteTable(TABLENAME); TEST_UTIL.shutdownMiniCluster(); } @@ -90,8 +98,7 @@ public class TestTableDeleteFamilyHandler { public void deleteColumnFamilyWithMultipleRegions() throws Exception { HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); - HTableDescriptor beforehtd = admin.getTableDescriptor(Bytes - .toBytes(TABLENAME)); + HTableDescriptor beforehtd = admin.getTableDescriptor(TABLENAME); FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); @@ -107,8 +114,7 @@ public class TestTableDeleteFamilyHandler { } // 3 - Check if table exists in FS - Path tableDir = new Path(TEST_UTIL.getDefaultRootDirPath().toString() + "/" - + TABLENAME); + Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), TABLENAME); assertTrue(fs.exists(tableDir)); // 4 - Check if all the 3 column families exist in FS @@ -120,7 +126,7 @@ public class TestTableDeleteFamilyHandler { for (int j = 0; j < cf.length; j++) { if (cf[j].isDir() == true && cf[j].getPath().getName().startsWith(".") == false) { - assertTrue(cf[j].getPath().getName().equals("cf" + k)); + assertEquals(cf[j].getPath().getName(), "cf" + k); k++; } } @@ -129,11 +135,10 @@ public class TestTableDeleteFamilyHandler { // TEST - Disable and delete the column family admin.disableTable(TABLENAME); - admin.deleteColumn(TABLENAME, "cf2"); + admin.deleteColumn(TABLENAME.getName(), "cf2"); // 5 - Check if only 2 column families exist in the descriptor - HTableDescriptor afterhtd = admin.getTableDescriptor(Bytes - .toBytes(TABLENAME)); + HTableDescriptor afterhtd = admin.getTableDescriptor(TABLENAME); assertEquals(2, afterhtd.getColumnFamilies().length); HColumnDescriptor[] newFamilies = afterhtd.getColumnFamilies(); assertTrue(newFamilies[0].getNameAsString().equals("cf1")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java index e431a2f..837ce26 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java @@ -26,9 +26,8 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.Set; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; @@ -37,6 +36,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hbase.util.FSUtils; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -50,7 +50,8 @@ import org.junit.experimental.categories.Category; public class TestTableDescriptorModification { private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final byte[] TABLE_NAME = Bytes.toBytes("table"); + private static final TableName TABLE_NAME = + TableName.valueOf("table"); private static final byte[] FAMILY_0 = Bytes.toBytes("cf0"); private static final byte[] FAMILY_1 = Bytes.toBytes("cf1"); @@ -133,8 +134,8 @@ public class TestTableDescriptorModification { } } - private void verifyTableDescriptor(final byte[] tableName, final byte[]... families) - throws IOException { + private void verifyTableDescriptor(final TableName tableName, + final byte[]... families) throws IOException { HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); // Verify descriptor from master @@ -143,15 +144,15 @@ public class TestTableDescriptorModification { // Verify descriptor from HDFS MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); - Path tableDir = HTableDescriptor.getTableDir(mfs.getRootDir(), tableName); + Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); verifyTableDescriptor(htd, tableName, families); } private void verifyTableDescriptor(final HTableDescriptor htd, - final byte[] tableName, final byte[]... families) { + final TableName tableName, final byte[]... families) { Set htdFamilies = htd.getFamiliesKeys(); - assertTrue(Bytes.equals(tableName, htd.getName())); + assertEquals(tableName, htd.getTableName()); assertEquals(families.length, htdFamilies.size()); for (byte[] familyName: families) { assertTrue("Expected family " + Bytes.toString(familyName), htdFamilies.contains(familyName)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java index 0b6f1cd..04371d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java @@ -24,6 +24,7 @@ import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -66,8 +67,7 @@ public class TestSnapshotHFileCleaner { // write an hfile to the snapshot directory String snapshotName = "snapshot"; byte[] snapshot = Bytes.toBytes(snapshotName); - String table = "table"; - byte[] tableName = Bytes.toBytes(table); + TableName tableName = TableName.valueOf("table"); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); HRegionInfo mockRegion = new HRegionInfo(tableName); Path regionSnapshotDir = new Path(snapshotDir, mockRegion.getEncodedName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java index 42b6f92..68dcc0f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java @@ -26,6 +26,7 @@ import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.SmallTests; @@ -79,7 +80,7 @@ public class TestSnapshotManager { @Test public void testInProcess() throws KeeperException, IOException { - String tableName = "testTable"; + TableName tableName = TableName.valueOf("testTable"); SnapshotManager manager = getNewManager(); TakeSnapshotHandler handler = Mockito.mock(TakeSnapshotHandler.class); assertFalse("Manager is in process when there is no current handler", diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java new file mode 100644 index 0000000..e3a3f46 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java @@ -0,0 +1,230 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.migration; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; + +import junit.framework.Assert; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.FsShell; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.util.ToolRunner; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Test upgrade from no namespace in 0.94 to namespace directory structure. + * Mainly tests that tables are migrated and consistent. Also verifies + * that snapshots have been migrated correctly. + * + * Uses a tarball which is an image of an 0.94 hbase.rootdir. + * + * Contains tables with currentKeys as the stored keys: + * foo, ns1.foo, ns2.foo + * + * Contains snapshots with snapshot{num}Keys as the contents: + * snapshot1Keys, snapshot2Keys + * + */ +@Category(MediumTests.class) +public class TestNamespaceUpgrade { + static final Log LOG = LogFactory.getLog(TestNamespaceUpgrade.class); + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private final static String snapshot1Keys[] = + {"1","10","2","3","4","5","6","7","8","9"}; + private final static String snapshot2Keys[] = + {"1","2","3","4","5","6","7","8","9"}; + private final static String currentKeys[] = + {"1","2","3","4","5","6","7","8","9","A"}; + private final static String tables[] = {"foo", "ns1.foo","ns.two.foo"}; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Start up our mini cluster on top of an 0.94 root.dir that has data from + // a 0.94 hbase run and see if we can migrate to 0.96 + TEST_UTIL.startMiniZKCluster(); + TEST_UTIL.startMiniDFSCluster(1); + Path testdir = TEST_UTIL.getDataTestDir("TestNamespaceUpgrade"); + // Untar our test dir. + File untar = untar(new File(testdir.toString())); + // Now copy the untar up into hdfs so when we start hbase, we'll run from it. + Configuration conf = TEST_UTIL.getConfiguration(); + FsShell shell = new FsShell(conf); + FileSystem fs = FileSystem.get(conf); + // find where hbase will root itself, so we can copy filesystem there + Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath(); + if (!fs.isDirectory(hbaseRootDir.getParent())) { + // mkdir at first + fs.mkdirs(hbaseRootDir.getParent()); + } + doFsCommand(shell, + new String [] {"-put", untar.toURI().toString(), hbaseRootDir.toString()}); + // See whats in minihdfs. + doFsCommand(shell, new String [] {"-lsr", "/"}); + Configuration toolConf = TEST_UTIL.getConfiguration(); + conf.set(HConstants.HBASE_DIR, TEST_UTIL.getDefaultRootDirPath().toString()); + ToolRunner.run(toolConf, new NamespaceUpgrade(), new String[]{"--upgrade"}); + + assertTrue(FSUtils.getVersion(fs, hbaseRootDir).equals(HConstants.FILE_SYSTEM_VERSION)); + TEST_UTIL.startMiniHBaseCluster(1, 1); + + for(String table: tables) { + int count = 0; + for(Result res: new HTable(TEST_UTIL.getConfiguration(), table).getScanner(new Scan())) { + assertEquals(currentKeys[count++], Bytes.toString(res.getRow())); + } + Assert.assertEquals(currentKeys.length, count); + } + assertEquals(2, TEST_UTIL.getHBaseAdmin().listNamespaceDescriptors().length); + } + + private static File untar(final File testdir) throws IOException { + // Find the src data under src/test/data + final String datafile = "TestNamespaceUpgrade"; + File srcTarFile = new File( + System.getProperty("project.build.testSourceDirectory", "src/test") + + File.separator + "data" + File.separator + datafile + ".tgz"); + File homedir = new File(testdir.toString()); + File tgtUntarDir = new File(homedir, "hbase"); + if (tgtUntarDir.exists()) { + if (!FileUtil.fullyDelete(tgtUntarDir)) { + throw new IOException("Failed delete of " + tgtUntarDir.toString()); + } + } + if (!srcTarFile.exists()) { + throw new IOException(srcTarFile+" does not exist"); + } + LOG.info("Untarring " + srcTarFile + " into " + homedir.toString()); + FileUtil.unTar(srcTarFile, homedir); + Assert.assertTrue(tgtUntarDir.exists()); + return tgtUntarDir; + } + + private static void doFsCommand(final FsShell shell, final String [] args) + throws Exception { + // Run the 'put' command. + int errcode = shell.run(args); + if (errcode != 0) throw new IOException("Failed put; errcode=" + errcode); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testSnapshots() throws IOException, InterruptedException { + String snapshots[][] = {snapshot1Keys, snapshot2Keys}; + for(int i=1; i<=snapshots.length; i++) { + for(String table: tables) { + TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot"+i, table+"_clone"+i); + FSUtils.logFileSystemState(FileSystem.get(TEST_UTIL.getConfiguration()), + FSUtils.getRootDir(TEST_UTIL.getConfiguration()), + LOG); + int count = 0; + for(Result res: new HTable(TEST_UTIL.getConfiguration(), table+"_clone"+i).getScanner(new + Scan())) { + assertEquals(snapshots[i-1][count++], Bytes.toString(res.getRow())); + } + Assert.assertEquals(table+"_snapshot"+i, snapshots[i-1].length, count); + } + } + } + + @Test + public void testRenameUsingSnapshots() throws IOException, InterruptedException { + String newNS = "newNS"; + TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(newNS).build()); + for(String table: tables) { + int count = 0; + for(Result res: new HTable(TEST_UTIL.getConfiguration(), table).getScanner(new + Scan())) { + assertEquals(currentKeys[count++], Bytes.toString(res.getRow())); + } + TEST_UTIL.getHBaseAdmin().snapshot(table+"_snapshot3", table); + final String newTableName = + newNS+ TableName.NAMESPACE_DELIM+table+"_clone3"; + TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot3", newTableName); + Thread.sleep(1000); + count = 0; + for(Result res: new HTable(TEST_UTIL.getConfiguration(), newTableName).getScanner(new + Scan())) { + assertEquals(currentKeys[count++], Bytes.toString(res.getRow())); + } + FSUtils.logFileSystemState(TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath() + , LOG); + Assert.assertEquals(newTableName, currentKeys.length, count); + TEST_UTIL.getHBaseAdmin().flush(newTableName); + TEST_UTIL.getHBaseAdmin().majorCompact(newTableName); + TEST_UTIL.waitFor(2000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws IOException { + try { + return TEST_UTIL.getHBaseAdmin().getCompactionState(newTableName) == + AdminProtos.GetRegionInfoResponse.CompactionState.NONE; + } catch (InterruptedException e) { + throw new IOException(e); + } + } + }); + } + + String nextNS = "nextNS"; + TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(nextNS).build()); + for(String table: tables) { + String srcTable = newNS+TableName.NAMESPACE_DELIM+table+"_clone3"; + TEST_UTIL.getHBaseAdmin().snapshot(table+"_snapshot4", srcTable); + String newTableName = nextNS+TableName.NAMESPACE_DELIM+table+"_clone4"; + TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot4", newTableName); + FSUtils.logFileSystemState(TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath() + , LOG); + int count = 0; + for(Result res: new HTable(TEST_UTIL.getConfiguration(), newTableName).getScanner(new + Scan())) { + assertEquals(currentKeys[count++], Bytes.toString(res.getRow())); + } + Assert.assertEquals(newTableName, currentKeys.length, count); + } + + } +} + diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java index 931cbfe..a0eddcc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; @@ -343,7 +344,7 @@ public class HFileReadWriteTest { columnDescriptor.setCompressionType(compression); columnDescriptor.setDataBlockEncoding(dataBlockEncoding); HRegionInfo regionInfo = new HRegionInfo(); - HTableDescriptor htd = new HTableDescriptor(TABLE_NAME); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE_NAME)); HRegion region = new HRegion(outputDir, null, fs, conf, regionInfo, htd, null); HStore store = new HStore(region, columnDescriptor, conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java index eb230a3..59bc2dc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.MultithreadedTestUtil; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; @@ -192,14 +193,14 @@ public class TestAtomicOperation extends HBaseTestCase { private void initHRegion (byte [] tableName, String callingMethod, int [] maxVersions, byte[] ... families) throws IOException { - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); int i=0; for(byte [] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family); hcd.setMaxVersions(maxVersions != null ? maxVersions[i++] : 1); htd.addFamily(hcd); } - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); Path path = new Path(DIR + callingMethod); if (fs.exists(path)) { if (!fs.delete(path, true)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java index db5668b..188deb7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; @@ -95,7 +96,7 @@ public class TestBlocksRead extends HBaseTestCase { */ private HRegion initHRegion(byte[] tableName, String callingMethod, HBaseConfiguration conf, String family) throws IOException { - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor familyDesc; for (int i = 0; i < BLOOM_TYPE.length; i++) { BloomType bloomType = BLOOM_TYPE[i]; @@ -105,7 +106,7 @@ public class TestBlocksRead extends HBaseTestCase { htd.addFamily(familyDesc); } - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); Path path = new Path(DIR + callingMethod); HRegion r = HRegion.createHRegion(info, path, conf, htd); blockCache = new CacheConfig(conf).getBlockCache(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java index 4169b80..93c900c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.util.Bytes; @@ -51,7 +52,7 @@ public class TestBlocksScanned extends HBaseTestCase { super.setUp(); TEST_UTIL = new HBaseTestingUtility(); - TESTTABLEDESC = new HTableDescriptor(TABLE); + TESTTABLEDESC = new HTableDescriptor(TableName.valueOf(TABLE)); TESTTABLEDESC.addFamily( new HColumnDescriptor(FAMILY) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index 2f49989..ea37a38 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; @@ -152,7 +153,7 @@ public class TestCacheOnWriteInSchema { HColumnDescriptor hcd = new HColumnDescriptor(family); hcd.setBloomFilterType(BloomType.ROWCOL); cowType.modifyFamilySchema(hcd); - HTableDescriptor htd = new HTableDescriptor(table); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table)); htd.addFamily(hcd); // Create a store based on the schema @@ -161,7 +162,7 @@ public class TestCacheOnWriteInSchema { Path logdir = new Path(DIR, logName); fs.delete(logdir, true); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); hlog = HLogFactory.createHLog(fs, basedir, logName, conf); region = new HRegion(basedir, hlog, fs, conf, info, htd, null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java index d1d56ea..89e8936 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java @@ -52,14 +52,14 @@ public class TestColumnSeeking { public void testDuplicateVersions() throws IOException { String family = "Family"; byte[] familyBytes = Bytes.toBytes("Family"); - String table = "TestDuplicateVersions"; + TableName table = TableName.valueOf("TestDuplicateVersions"); HColumnDescriptor hcd = new HColumnDescriptor(familyBytes).setMaxVersions(1000); hcd.setMaxVersions(3); HTableDescriptor htd = new HTableDescriptor(table); htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false); + HRegionInfo info = new HRegionInfo(table, null, null, false); HRegion region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL .getConfiguration(), htd); @@ -166,14 +166,15 @@ public class TestColumnSeeking { public void testReseeking() throws IOException { String family = "Family"; byte[] familyBytes = Bytes.toBytes("Family"); - String table = "TestSingleVersions"; + TableName table = + TableName.valueOf("TestSingleVersions"); HTableDescriptor htd = new HTableDescriptor(table); HColumnDescriptor hcd = new HColumnDescriptor(family); hcd.setMaxVersions(3); htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false); + HRegionInfo info = new HRegionInfo(table, null, null, false); HRegion region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL .getConfiguration(), htd); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java index 6d2378e..807751a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java @@ -28,13 +28,13 @@ import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.util.Bytes; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -123,7 +123,8 @@ public class TestCompactionState { final CompactionState expectedState, boolean singleFamily) throws IOException, InterruptedException { // Create a table with regions - byte [] table = Bytes.toBytes(tableName); + TableName table = + TableName.valueOf(tableName); byte [] family = Bytes.toBytes("family"); byte [][] families = {family, Bytes.add(family, Bytes.toBytes("2")), Bytes.add(family, Bytes.toBytes("3"))}; @@ -139,24 +140,24 @@ public class TestCompactionState { HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); if (expectedState == CompactionState.MINOR) { if (singleFamily) { - admin.compact(table, family); + admin.compact(table.getName(), family); } else { - admin.compact(table); + admin.compact(table.getName()); } } else { if (singleFamily) { - admin.majorCompact(table, family); + admin.majorCompact(table.getName(), family); } else { - admin.majorCompact(table); + admin.majorCompact(table.getName()); } } long curt = System.currentTimeMillis(); long waitTime = 5000; long endt = curt + waitTime; - CompactionState state = admin.getCompactionState(table); + CompactionState state = admin.getCompactionState(table.getName()); while (state == CompactionState.NONE && curt < endt) { Thread.sleep(10); - state = admin.getCompactionState(table); + state = admin.getCompactionState(table.getName()); curt = System.currentTimeMillis(); } // Now, should have the right compaction state, @@ -168,10 +169,10 @@ public class TestCompactionState { } } else { // Wait until the compaction is done - state = admin.getCompactionState(table); + state = admin.getCompactionState(table.getName()); while (state != CompactionState.NONE && curt < endt) { Thread.sleep(10); - state = admin.getCompactionState(table); + state = admin.getCompactionState(table.getName()); } // Now, compaction should be done. assertEquals(CompactionState.NONE, state); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java index c009682..11d7c10 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; @@ -42,6 +43,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPoli import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; import org.junit.After; import org.junit.experimental.categories.Category; @@ -87,14 +89,14 @@ public class TestDefaultCompactSelection extends TestCase { fs.delete(logdir, true); - HTableDescriptor htd = new HTableDescriptor(Bytes.toBytes("table")); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("table"))); htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); hlog = HLogFactory.createHLog(fs, basedir, logName, conf); region = HRegion.createHRegion(info, basedir, conf, htd); HRegion.closeHRegion(region); - Path tableDir = new Path(basedir, Bytes.toString(htd.getName())); + Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName()); region = new HRegion(tableDir, hlog, fs, conf, info, htd, null); store = new HStore(region, hcd, conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index 911b7ad..7cf6567 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -34,6 +34,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Chore; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -87,7 +88,8 @@ public class TestEndToEndSplitTransaction { @Test public void testMasterOpsWhileSplitting() throws Exception { - byte[] tableName = Bytes.toBytes("TestSplit"); + TableName tableName = + TableName.valueOf("TestSplit"); byte[] familyName = Bytes.toBytes("fam"); HTable ht = TEST_UTIL.createTable(tableName, familyName); TEST_UTIL.loadTable(ht, familyName); @@ -146,7 +148,7 @@ public class TestEndToEndSplitTransaction { * attempt to locate the region and perform a get and scan * @return True if successful, False otherwise. */ - private boolean test(HConnection con, byte[] tableName, byte[] row, + private boolean test(HConnection con, TableName tableName, byte[] row, HRegionServer server) { // not using HTable to avoid timeouts and retries try { @@ -173,7 +175,8 @@ public class TestEndToEndSplitTransaction { @Test public void testFromClientSideWhileSplitting() throws Throwable { LOG.info("Starting testFromClientSideWhileSplitting"); - final byte[] TABLENAME = Bytes.toBytes("testFromClientSideWhileSplitting"); + final TableName TABLENAME = + TableName.valueOf("testFromClientSideWhileSplitting"); final byte[] FAMILY = Bytes.toBytes("family"); //SplitTransaction will update the meta table by offlining the parent region, and adding info @@ -206,13 +209,14 @@ public class TestEndToEndSplitTransaction { static class RegionSplitter extends Thread { Throwable ex; HTable table; - byte[] tableName, family; + TableName tableName; + byte[] family; HBaseAdmin admin; HRegionServer rs; RegionSplitter(HTable table) throws IOException { this.table = table; - this.tableName = table.getTableName(); + this.tableName = table.getName(); this.family = table.getTableDescriptor().getFamiliesKeys().iterator().next(); admin = TEST_UTIL.getHBaseAdmin(); rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); @@ -281,10 +285,10 @@ public class TestEndToEndSplitTransaction { */ static class RegionChecker extends Chore { Configuration conf; - byte[] tableName; + TableName tableName; Throwable ex; - RegionChecker(Configuration conf, Stoppable stopper, byte[] tableName) { + RegionChecker(Configuration conf, Stoppable stopper, TableName tableName) { super("RegionChecker", 10, stopper); this.conf = conf; this.tableName = tableName; @@ -411,7 +415,7 @@ public class TestEndToEndSplitTransaction { long start = System.currentTimeMillis(); log("blocking until region is split:" + Bytes.toStringBinary(regionName)); HRegionInfo daughterA = null, daughterB = null; - HTable metaTable = new HTable(conf, HConstants.META_TABLE_NAME); + HTable metaTable = new HTable(conf, TableName.META_TABLE_NAME); try { while (System.currentTimeMillis() - start < timeout) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java index b547097..cf94ec1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java @@ -180,7 +180,7 @@ public class TestFSErrorsExposed { byte[] fam = Bytes.toBytes("fam"); HBaseAdmin admin = new HBaseAdmin(util.getConfiguration()); - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor(fam) .setMaxVersions(1) .setBlockCacheEnabled(false) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index f762c12..c05f4e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -26,6 +26,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -72,11 +73,11 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { try { // Write rows for three tables 'A', 'B', and 'C'. for (char c = 'A'; c < 'D'; c++) { - HTableDescriptor htd = new HTableDescriptor("" + c); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("" + c)); final int last = 128; final int interval = 2; for (int i = 0; i <= last; i += interval) { - HRegionInfo hri = new HRegionInfo(htd.getName(), + HRegionInfo hri = new HRegionInfo(htd.getTableName(), i == 0? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i), i == last? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i + interval)); @@ -105,8 +106,9 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { findRow(mr, 'C', 46, 46); findRow(mr, 'C', 43, 42); // Now delete 'C' and make sure I don't get entries from 'B'. - byte [] firstRowInC = HRegionInfo.createRegionName(Bytes.toBytes("" + 'C'), - HConstants.EMPTY_BYTE_ARRAY, HConstants.ZEROES, false); + byte [] firstRowInC = HRegionInfo.createRegionName( + TableName.valueOf("" + 'C'), + HConstants.EMPTY_BYTE_ARRAY, HConstants.ZEROES, false); Scan scan = new Scan(firstRowInC); s = mr.getScanner(scan); try { @@ -151,10 +153,11 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { private byte [] findRow(final HRegion mr, final char table, final int rowToFind, final int answer) throws IOException { - byte [] tableb = Bytes.toBytes("" + table); + TableName tableb = TableName.valueOf("" + table); // Find the row. byte [] tofindBytes = Bytes.toBytes((short)rowToFind); - byte [] metaKey = HRegionInfo.createRegionName(tableb, tofindBytes, + byte [] metaKey = HRegionInfo.createRegionName( + tableb, tofindBytes, HConstants.NINES, false); LOG.info("find=" + new String(metaKey)); Result r = mr.getClosestRowBefore(metaKey); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 2da8b05..88d6c3d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -50,6 +50,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -253,7 +254,8 @@ public class TestHRegion extends HBaseTestCase { public void testSkipRecoveredEditsReplay() throws Exception { String method = "testSkipRecoveredEditsReplay"; - byte[] tableName = Bytes.toBytes(method); + TableName tableName = + TableName.valueOf(method); byte[] family = Bytes.toBytes("family"); this.region = initHRegion(tableName, method, conf, family); try { @@ -303,7 +305,8 @@ public class TestHRegion extends HBaseTestCase { public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception { String method = "testSkipRecoveredEditsReplaySomeIgnored"; - byte[] tableName = Bytes.toBytes(method); + TableName tableName = + TableName.valueOf(method); byte[] family = Bytes.toBytes("family"); this.region = initHRegion(tableName, method, conf, family); try { @@ -396,7 +399,8 @@ public class TestHRegion extends HBaseTestCase { @Test public void testRecoveredEditsReplayCompaction() throws Exception { String method = "testRecoveredEditsReplayCompaction"; - byte[] tableName = Bytes.toBytes(method); + TableName tableName = + TableName.valueOf(method); byte[] family = Bytes.toBytes("family"); this.region = initHRegion(tableName, method, conf, family); try { @@ -462,7 +466,7 @@ public class TestHRegion extends HBaseTestCase { HTableDescriptor htd = region.getTableDesc(); HRegionInfo info = region.getRegionInfo(); region.close(); - region = HRegion.openHRegion(conf, fs, regiondir.getParent().getParent(), info, htd, null); + region = HRegion.openHRegion(conf, fs, new Path(DIR+method),info, htd, null); //now check whether we have only one store file, the compacted one Collection sfs = region.getStore(family).getStorefiles(); @@ -1698,7 +1702,7 @@ public class TestHRegion extends HBaseTestCase { public void stestGet_Root() throws IOException { //Setting up region String method = this.getName(); - this.region = initHRegion(HConstants.ROOT_TABLE_NAME, + this.region = initHRegion(TableName.ROOT_TABLE_NAME, method, conf, HConstants.CATALOG_FAMILY); try { //Add to memstore @@ -3319,9 +3323,9 @@ public class TestHRegion extends HBaseTestCase { .setMaxVersions(Integer.MAX_VALUE) .setBloomFilterType(BloomType.ROWCOL); - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); Path path = new Path(DIR + "testBloomFilterSize"); this.region = HRegion.createHRegion(info, path, conf, htd); try { @@ -3378,9 +3382,9 @@ public class TestHRegion extends HBaseTestCase { HColumnDescriptor hcd = new HColumnDescriptor(FAMILY) .setMaxVersions(Integer.MAX_VALUE) .setBloomFilterType(BloomType.ROWCOL); - HTableDescriptor htd = new HTableDescriptor(TABLE); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); Path path = new Path(DIR + "testAllColumnsWithBloomFilter"); this.region = HRegion.createHRegion(info, path, conf, htd); try { @@ -3428,9 +3432,9 @@ public class TestHRegion extends HBaseTestCase { .setMaxVersions(Integer.MAX_VALUE) .setBloomFilterType(BloomType.ROWCOL); - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); Path path = new Path(DIR + "TestDeleteRowWithBloomFilter"); this.region = HRegion.createHRegion(info, path, conf, htd); try { @@ -3486,7 +3490,7 @@ public class TestHRegion extends HBaseTestCase { ht.put(put); HRegion firstRegion = htu.getHBaseCluster(). - getRegions(Bytes.toBytes(this.getName())).get(0); + getRegions(TableName.valueOf(this.getName())).get(0); firstRegion.flushcache(); HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution(); @@ -3532,9 +3536,9 @@ public class TestHRegion extends HBaseTestCase { try { FileSystem fs = Mockito.mock(FileSystem.class); Mockito.when(fs.exists((Path) Mockito.anyObject())).thenThrow(new IOException()); - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); htd.addFamily(new HColumnDescriptor("cf")); - info = new HRegionInfo(htd.getName(), HConstants.EMPTY_BYTE_ARRAY, + info = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, false); Path path = new Path(DIR + "testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization"); region = HRegion.newHRegion(path, null, fs, conf, info, htd, null); @@ -3564,10 +3568,10 @@ public class TestHRegion extends HBaseTestCase { Path rootDir = new Path(DIR + "testRegionInfoFileCreation"); Configuration conf = HBaseConfiguration.create(this.conf); - HTableDescriptor htd = new HTableDescriptor("testtb"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testtb")); htd.addFamily(new HColumnDescriptor("cf")); - HRegionInfo hri = new HRegionInfo(htd.getName()); + HRegionInfo hri = new HRegionInfo(htd.getTableName()); // Create a region and skip the initialization (like CreateTableHandler) HRegion region = HRegion.createHRegion(hri, rootDir, conf, htd, null, false, true); @@ -3909,12 +3913,12 @@ public class TestHRegion extends HBaseTestCase { Durability mutationDurability, long timeout, boolean expectAppend, final boolean expectSync, final boolean expectSyncFromLogSyncer) throws Exception { method = method + "_" + tableDurability.name() + "_" + mutationDurability.name(); - byte[] tableName = Bytes.toBytes(method); + TableName tableName = TableName.valueOf(method); byte[] family = Bytes.toBytes("family"); Path logDir = new Path(new Path(DIR + method), "log"); HLog hlog = HLogFactory.createHLog(fs, logDir, UUID.randomUUID().toString(), conf); final HLog log = spy(hlog); - this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW, + this.region = initHRegion(tableName.getName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, method, conf, false, tableDurability, log, new byte[][] {family}); @@ -4060,6 +4064,20 @@ public class TestHRegion extends HBaseTestCase { * @throws IOException * @return A region on which you must call {@link HRegion#closeHRegion(HRegion)} when done. */ + public static HRegion initHRegion (TableName tableName, String callingMethod, + Configuration conf, byte [] ... families) + throws IOException{ + return initHRegion(tableName.getName(), null, null, callingMethod, conf, false, families); + } + + /** + * @param tableName + * @param callingMethod + * @param conf + * @param families + * @throws IOException + * @return A region on which you must call {@link HRegion#closeHRegion(HRegion)} when done. + */ public static HRegion initHRegion (byte [] tableName, String callingMethod, Configuration conf, byte [] ... families) throws IOException{ @@ -4103,7 +4121,7 @@ public class TestHRegion extends HBaseTestCase { String callingMethod, Configuration conf, boolean isReadOnly, Durability durability, HLog hlog, byte[]... families) throws IOException { - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); htd.setReadOnly(isReadOnly); for(byte [] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family); @@ -4112,7 +4130,7 @@ public class TestHRegion extends HBaseTestCase { htd.addFamily(hcd); } htd.setDurability(durability); - HRegionInfo info = new HRegionInfo(htd.getName(), startKey, stopKey, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false); Path path = new Path(DIR + callingMethod); FileSystem fs = FileSystem.get(conf); if (fs.exists(path)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java index 044aa02..7870cae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java @@ -27,8 +27,6 @@ import java.io.IOException; import java.net.URI; import java.util.Collection; -import javax.management.RuntimeErrorException; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -38,17 +36,15 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.SmallTests; -import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.util.Progressable; import org.junit.Test; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.junit.experimental.categories.Category; -import junit.framework.TestCase; @Category(SmallTests.class) public class TestHRegionFileSystem { @@ -62,8 +58,9 @@ public class TestHRegionFileSystem { Configuration conf = TEST_UTIL.getConfiguration(); // Create a Region - HRegionInfo hri = new HRegionInfo(Bytes.toBytes("TestTable")); - HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri); + HRegionInfo hri = new HRegionInfo(TableName.valueOf("TestTable")); + HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, + FSUtils.getTableDir(rootDir, hri.getTableName()), hri); // Verify if the region is on disk Path regionDir = regionFs.getRegionDir(); @@ -74,11 +71,13 @@ public class TestHRegionFileSystem { assertEquals(hri, hriVerify); // Open the region - regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, rootDir, hri, false); + regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, + FSUtils.getTableDir(rootDir, hri.getTableName()), hri, false); assertEquals(regionDir, regionFs.getRegionDir()); // Delete the region - HRegionFileSystem.deleteRegionFromFileSystem(conf, fs, rootDir, hri); + HRegionFileSystem.deleteRegionFromFileSystem(conf, fs, + FSUtils.getTableDir(rootDir, hri.getTableName()), hri); assertFalse("The region folder should be removed", fs.exists(regionDir)); fs.delete(rootDir, true); @@ -91,7 +90,7 @@ public class TestHRegionFileSystem { Configuration conf = TEST_UTIL.getConfiguration(); // Create a Region - HRegionInfo hri = new HRegionInfo(Bytes.toBytes("TestTable")); + HRegionInfo hri = new HRegionInfo(TableName.valueOf("TestTable")); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri); assertTrue(fs.exists(regionFs.getRegionDir())); @@ -216,7 +215,7 @@ public class TestHRegionFileSystem { // Create a Region String familyName = "cf"; - HRegionInfo hri = new HRegionInfo(Bytes.toBytes("TestTable")); + HRegionInfo hri = new HRegionInfo(TableName.valueOf("TestTable")); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri); // New region, no store files diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java index 8e06d7d..340e229 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java @@ -27,8 +27,8 @@ import static org.junit.Assert.fail; import java.io.IOException; import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; @@ -82,9 +82,9 @@ public class TestHRegionInfo { @Test public void testCreateHRegionInfoName() throws Exception { String tableName = "tablename"; - final byte [] tn = Bytes.toBytes(tableName); + final TableName tn = TableName.valueOf(tableName); String startKey = "startkey"; - final byte [] sk = Bytes.toBytes(startKey); + final byte[] sk = Bytes.toBytes(startKey); String id = "id"; // old format region name @@ -105,9 +105,9 @@ public class TestHRegionInfo { @Test public void testContainsRange() { - HTableDescriptor tableDesc = new HTableDescriptor("testtable"); + HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf("testtable")); HRegionInfo hri = new HRegionInfo( - tableDesc.getName(), Bytes.toBytes("a"), Bytes.toBytes("g")); + tableDesc.getTableName(), Bytes.toBytes("a"), Bytes.toBytes("g")); // Single row range at start of region assertTrue(hri.containsRange(Bytes.toBytes("a"), Bytes.toBytes("a"))); // Fully contained range @@ -133,11 +133,11 @@ public class TestHRegionInfo { @Test public void testLastRegionCompare() { - HTableDescriptor tableDesc = new HTableDescriptor("testtable"); + HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf("testtable")); HRegionInfo hrip = new HRegionInfo( - tableDesc.getName(), Bytes.toBytes("a"), new byte[0]); + tableDesc.getTableName(), Bytes.toBytes("a"), new byte[0]); HRegionInfo hric = new HRegionInfo( - tableDesc.getName(), Bytes.toBytes("a"), Bytes.toBytes("b")); + tableDesc.getTableName(), Bytes.toBytes("a"), Bytes.toBytes("b")); assertTrue(hrip.compareTo(hric) > 0); } @@ -148,7 +148,7 @@ public class TestHRegionInfo { @Test public void testComparator() { - byte[] tablename = Bytes.toBytes("comparatorTablename"); + TableName tablename = TableName.valueOf("comparatorTablename"); byte[] empty = new byte[0]; HRegionInfo older = new HRegionInfo(tablename, empty, empty, false, 0L); HRegionInfo newer = new HRegionInfo(tablename, empty, empty, false, 1L); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java index fb2f7a1..0b4110a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; @@ -64,7 +65,7 @@ public class TestHRegionOnCluster { HMaster master = cluster.getMaster(); // Create table - HTableDescriptor desc = new HTableDescriptor(TABLENAME); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TABLENAME)); desc.addFamily(new HColumnDescriptor(FAMILY)); HBaseAdmin hbaseAdmin = TEST_UTIL.getHBaseAdmin(); hbaseAdmin.createTable(desc); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index 084b991..379bf49 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -144,7 +144,7 @@ public class TestHRegionServerBulkLoad { // bulk load HFiles final HConnection conn = UTIL.getHBaseAdmin().getConnection(); - byte[] tbl = Bytes.toBytes(tableName); + TableName tbl = TableName.valueOf(tableName); RegionServerCallable callable = new RegionServerCallable(conn, tbl, Bytes.toBytes("aaa")) { @Override @@ -251,7 +251,7 @@ public class TestHRegionServerBulkLoad { private void setupTable(String table, int cfs) throws IOException { try { LOG.info("Creating table " + table); - HTableDescriptor htd = new HTableDescriptor(table); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table)); for (int i = 0; i < 10; i++) { htd.addFamily(new HColumnDescriptor(family(i))); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java index 69b6827..5c543a8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -92,7 +93,7 @@ public class TestJoinedScanners { byte [][] families = {cf_essential, cf_joined}; byte[] tableName = Bytes.toBytes(this.getClass().getSimpleName()); - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); for(byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family); hcd.setDataBlockEncoding(blockEncoding); @@ -177,13 +178,13 @@ public class TestJoinedScanners { private static HRegion initHRegion(byte[] tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, byte[]... families) throws IOException { - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); for(byte [] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family); hcd.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF); htd.addFamily(hcd); } - HRegionInfo info = new HRegionInfo(htd.getName(), startKey, stopKey, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false); Path path = new Path(DIR + callingMethod); FileSystem fs = FileSystem.get(conf); if (fs.exists(path)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java index 86a30c8..cff6488 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java @@ -35,7 +35,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -500,7 +499,7 @@ public class TestMemStore extends TestCase { for (int k = start; k <= end; k++) { byte [] kk = Bytes.toBytes(k); byte [] row = - Bytes.toBytes(".META.,table," + Bytes.toString(kk) + ",1," + k); + Bytes.toBytes(TableName.META_TABLE_NAME+",table," + Bytes.toString(kk) + ",1," + k); KeyValue key = new KeyValue(row, CONTENTS, BASIC, System.currentTimeMillis(), Bytes.toBytes(CONTENTSTR + k)); @@ -517,7 +516,7 @@ public class TestMemStore extends TestCase { System.out.println(kv); byte [] b = kv.getRow(); // Hardcoded offsets into String - String str = Bytes.toString(b, 13, 4); + String str = Bytes.toString(b, TableName.META_TABLE_NAME.getName().length+7, 4); byte [] bb = Bytes.toBytes(index); String bbStr = Bytes.toString(bb); assertEquals(str, bbStr); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestParallelPut.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestParallelPut.java index 9fae1ca..722c2ee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestParallelPut.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestParallelPut.java @@ -19,50 +19,20 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; -import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread; -import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.filter.BinaryComparator; -import org.apache.hadoop.hbase.filter.ColumnCountGetFilter; -import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; -import org.apache.hadoop.hbase.filter.Filter; -import org.apache.hadoop.hbase.filter.FilterList; -import org.apache.hadoop.hbase.filter.NullComparator; -import org.apache.hadoop.hbase.filter.PrefixFilter; -import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; -import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; -import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; -import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; -import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.PairOfSameType; -import org.apache.hadoop.hbase.util.Threads; -import org.junit.Test; - -import com.google.common.collect.Lists; + import org.junit.experimental.categories.Category; @@ -184,11 +154,11 @@ public class TestParallelPut extends HBaseTestCase { private void initHRegion(byte [] tableName, String callingMethod, Configuration conf, byte [] ... families) throws IOException{ - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); for(byte [] family : families) { htd.addFamily(new HColumnDescriptor(family)); } - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); Path path = new Path(DIR + callingMethod); if (fs.exists(path)) { if (!fs.delete(path, true)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java index 49a7da5..4c634ae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.protobuf.ResponseConverter; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest; @@ -91,10 +92,10 @@ public class TestRSStatusServlet { @Test public void testWithRegions() throws IOException, ServiceException { - HTableDescriptor htd = new HTableDescriptor("mytable"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("mytable")); List regions = Lists.newArrayList( - new HRegionInfo(htd.getName(), Bytes.toBytes("a"), Bytes.toBytes("d")), - new HRegionInfo(htd.getName(), Bytes.toBytes("d"), Bytes.toBytes("z")) + new HRegionInfo(htd.getTableName(), Bytes.toBytes("a"), Bytes.toBytes("d")), + new HRegionInfo(htd.getTableName(), Bytes.toBytes("d"), Bytes.toBytes("z")) ); Mockito.doReturn(ResponseConverter.buildGetOnlineRegionResponse( regions)).when(rs).getOnlineRegion((RpcController)Mockito.any(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java index fc2760c..f959e0b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.client.HTable; @@ -52,7 +53,8 @@ public class TestRegionFavoredNodes { private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static HTable table; - private static final byte[] TABLE_NAME = Bytes.toBytes("table"); + private static final TableName TABLE_NAME = + TableName.valueOf("table"); private static final byte[] COLUMN_FAMILY = Bytes.toBytes("family"); private static final int FAVORED_NODES_NUM = 3; private static final int REGION_SERVERS = 6; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java index f8ff58e..9329d76 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java @@ -29,6 +29,7 @@ import java.util.List; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -346,7 +347,8 @@ public class TestRegionMergeTransaction { @Test public void testMeregedRegionBoundary() { - byte[] tableName = Bytes.toBytes("testMeregedRegionBoundary"); + TableName tableName = + TableName.valueOf("testMeregedRegionBoundary"); byte[] a = Bytes.toBytes("a"); byte[] b = Bytes.toBytes("b"); byte[] z = Bytes.toBytes("z"); @@ -354,19 +356,19 @@ public class TestRegionMergeTransaction { HRegionInfo r2 = new HRegionInfo(tableName, a, z); HRegionInfo m = RegionMergeTransaction.getMergedRegionInfo(r1, r2); assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) - && Bytes.equals(m.getEndKey(), r1.getEndKey())); + && Bytes.equals(m.getEndKey(), r1.getEndKey())); r1 = new HRegionInfo(tableName, null, a); r2 = new HRegionInfo(tableName, a, z); m = RegionMergeTransaction.getMergedRegionInfo(r1, r2); assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) - && Bytes.equals(m.getEndKey(), r2.getEndKey())); + && Bytes.equals(m.getEndKey(), r2.getEndKey())); r1 = new HRegionInfo(tableName, null, a); r2 = new HRegionInfo(tableName, z, null); m = RegionMergeTransaction.getMergedRegionInfo(r1, r2); assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) - && Bytes.equals(m.getEndKey(), r2.getEndKey())); + && Bytes.equals(m.getEndKey(), r2.getEndKey())); r1 = new HRegionInfo(tableName, a, z); r2 = new HRegionInfo(tableName, z, null); @@ -396,10 +398,10 @@ public class TestRegionMergeTransaction { final byte[] startrow, final byte[] endrow) throws IOException { // Make a region with start and end keys. - HTableDescriptor htd = new HTableDescriptor("table"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table")); HColumnDescriptor hcd = new HColumnDescriptor(CF); htd.addFamily(hcd); - HRegionInfo hri = new HRegionInfo(htd.getName(), startrow, endrow); + HRegionInfo hri = new HRegionInfo(htd.getTableName(), startrow, endrow); HRegion a = HRegion.createHRegion(hri, testdir, TEST_UTIL.getConfiguration(), htd); HRegion.closeHRegion(a); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java index 32feff4..5e3aa7d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java @@ -31,6 +31,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -52,6 +53,7 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; @@ -107,7 +109,8 @@ public class TestRegionMergeTransactionOnCluster { @Test public void testWholesomeMerge() throws Exception { LOG.info("Starting testWholesomeMerge"); - final byte[] tableName = Bytes.toBytes("testWholesomeMerge"); + final TableName tableName = + TableName.valueOf("testWholesomeMerge"); // Create table and load data. HTable table = createTableAndLoadData(master, tableName); @@ -149,7 +152,8 @@ public class TestRegionMergeTransactionOnCluster { LOG.info("Starting testCleanMergeReference"); admin.enableCatalogJanitor(false); try { - final byte[] tableName = Bytes.toBytes("testCleanMergeReference"); + final TableName tableName = + TableName.valueOf("testCleanMergeReference"); // Create table and load data. HTable table = createTableAndLoadData(master, tableName); // Merge 1st and 2nd region @@ -160,10 +164,10 @@ public class TestRegionMergeTransactionOnCluster { List> tableRegions = MetaReader .getTableRegionsAndLocations(master.getCatalogTracker(), - Bytes.toString(tableName)); + tableName); HRegionInfo mergedRegionInfo = tableRegions.get(0).getFirst(); HTableDescriptor tableDescritor = master.getTableDescriptors().get( - Bytes.toString(tableName)); + tableName); Result mergedRegionResult = MetaReader.getRegionResult( master.getCatalogTracker(), mergedRegionInfo.getRegionName()); @@ -181,7 +185,7 @@ public class TestRegionMergeTransactionOnCluster { FileSystem fs = master.getMasterFileSystem().getFileSystem(); Path rootDir = master.getMasterFileSystem().getRootDir(); - Path tabledir = new Path(rootDir, mergedRegionInfo.getTableNameAsString()); + Path tabledir = FSUtils.getTableDir(rootDir, mergedRegionInfo.getTableName()); Path regionAdir = new Path(tabledir, regionA.getEncodedName()); Path regionBdir = new Path(tabledir, regionB.getEncodedName()); assertTrue(fs.exists(regionAdir)); @@ -228,7 +232,7 @@ public class TestRegionMergeTransactionOnCluster { @Test public void testMerge() throws Exception { LOG.info("Starting testMerge"); - final byte[] tableName = Bytes.toBytes("testMerge"); + final TableName tableName = TableName.valueOf("testMerge"); try { // Create table and load data. @@ -272,7 +276,7 @@ public class TestRegionMergeTransactionOnCluster { } private PairOfSameType mergeRegionsAndVerifyRegionNum( - HMaster master, byte[] tablename, + HMaster master, TableName tablename, int regionAnum, int regionBnum, int expectedRegionNum) throws Exception { PairOfSameType mergedRegions = requestMergeRegion(master, tablename, regionAnum, regionBnum); @@ -281,11 +285,11 @@ public class TestRegionMergeTransactionOnCluster { } private PairOfSameType requestMergeRegion( - HMaster master, byte[] tablename, + HMaster master, TableName tablename, int regionAnum, int regionBnum) throws Exception { List> tableRegions = MetaReader .getTableRegionsAndLocations(master.getCatalogTracker(), - Bytes.toString(tablename)); + tablename); HRegionInfo regionA = tableRegions.get(regionAnum).getFirst(); HRegionInfo regionB = tableRegions.get(regionBnum).getFirst(); TEST_UTIL.getHBaseAdmin().mergeRegions( @@ -294,14 +298,14 @@ public class TestRegionMergeTransactionOnCluster { return new PairOfSameType(regionA, regionB); } - private void waitAndVerifyRegionNum(HMaster master, byte[] tablename, + private void waitAndVerifyRegionNum(HMaster master, TableName tablename, int expectedRegionNum) throws Exception { List> tableRegionsInMeta; List tableRegionsInMaster; long timeout = System.currentTimeMillis() + waitTime; while (System.currentTimeMillis() < timeout) { tableRegionsInMeta = MetaReader.getTableRegionsAndLocations( - master.getCatalogTracker(), Bytes.toString(tablename)); + master.getCatalogTracker(), tablename); tableRegionsInMaster = master.getAssignmentManager().getRegionStates() .getRegionsOfTable(tablename); if (tableRegionsInMeta.size() == expectedRegionNum @@ -312,17 +316,17 @@ public class TestRegionMergeTransactionOnCluster { } tableRegionsInMeta = MetaReader.getTableRegionsAndLocations( - master.getCatalogTracker(), Bytes.toString(tablename)); + master.getCatalogTracker(), tablename); LOG.info("Regions after merge:" + Joiner.on(',').join(tableRegionsInMeta)); assertEquals(expectedRegionNum, tableRegionsInMeta.size()); } - private HTable createTableAndLoadData(HMaster master, byte[] tablename) + private HTable createTableAndLoadData(HMaster master, TableName tablename) throws Exception { return createTableAndLoadData(master, tablename, INITIAL_REGION_NUM); } - private HTable createTableAndLoadData(HMaster master, byte[] tablename, + private HTable createTableAndLoadData(HMaster master, TableName tablename, int numRegions) throws Exception { assertTrue("ROWSIZE must > numregions:" + numRegions, ROWSIZE > numRegions); byte[][] splitRows = new byte[numRegions - 1][]; @@ -339,14 +343,14 @@ public class TestRegionMergeTransactionOnCluster { List> tableRegions; while (System.currentTimeMillis() < timeout) { tableRegions = MetaReader.getTableRegionsAndLocations( - master.getCatalogTracker(), Bytes.toString(tablename)); + master.getCatalogTracker(), tablename); if (tableRegions.size() == numRegions) break; Thread.sleep(250); } tableRegions = MetaReader.getTableRegionsAndLocations( - master.getCatalogTracker(), Bytes.toString(tablename)); + master.getCatalogTracker(), tablename); LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions)); assertEquals(numRegions, tableRegions.size()); return table; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java index 5b1fe58..1189406 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java @@ -62,7 +62,7 @@ public class TestRegionServerNoMaster { @BeforeClass public static void before() throws Exception { HTU.startMiniCluster(NB_SERVERS); - final byte[] tableName = Bytes.toBytes(TestRegionServerNoMaster.class.getName()); + final byte[] tableName = Bytes.toBytes(TestRegionServerNoMaster.class.getSimpleName()); // Create table then get the single region for our new table. table = HTU.createTable(tableName, HConstants.CATALOG_FAMILY); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java index d163319..78491f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -46,7 +47,7 @@ public class TestRegionSplitPolicy { private HTableDescriptor htd; private HRegion mockRegion; private TreeMap stores; - private static final byte [] TABLENAME = new byte [] {'t'}; + private static final TableName TABLENAME = TableName.valueOf("t"); @Before public void setupMocks() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java index 1304f00..0397170 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java @@ -56,10 +56,10 @@ public class TestResettingCounters { byte [][] rows = new byte [numRows][]; for (int i=0; i regions = cluster.getRegions(tableName); HRegionInfo hri = getAndCheckSingleTableRegion(regions); int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName()); final HRegionServer regionServer = cluster.getRegionServer(regionServerIndex); - insertData(tableName, admin, t); + insertData(tableName.getName(), admin, t); t.close(); // Turn off balancer so it doesn't cut in and mess up our placements. @@ -216,10 +217,11 @@ public class TestSplitTransactionOnCluster { RegionStates regionStates = cluster.getMaster().getAssignmentManager().getRegionStates(); Map rit = regionStates.getRegionsInTransition(); - for (int i=0; rit.containsKey(hri.getTableNameAsString()) && i<100; i++) { + for (int i=0; rit.containsKey(hri.getTableName()) && i<100; i++) { Thread.sleep(100); } - assertFalse("region still in transition", rit.containsKey(rit.containsKey(hri.getTableNameAsString()))); + assertFalse("region still in transition", rit.containsKey( + rit.containsKey(hri.getTableName()))); List onlineRegions = regionServer.getOnlineRegions(tableName); // Region server side split is successful. @@ -460,7 +462,8 @@ public class TestSplitTransactionOnCluster { public void testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles() throws Exception { Configuration conf = TESTING_UTIL.getConfiguration(); ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TESTING_UTIL); - String userTableName = "testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles"; + TableName userTableName = + TableName.valueOf("testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles"); HTableDescriptor htd = new HTableDescriptor(userTableName); HColumnDescriptor hcd = new HColumnDescriptor("col"); htd.addFamily(hcd); @@ -473,16 +476,16 @@ public class TestSplitTransactionOnCluster { String val = "Val" + i; p.add("col".getBytes(), "ql".getBytes(), val.getBytes()); table.put(p); - admin.flush(userTableName); + admin.flush(userTableName.getName()); Delete d = new Delete(row.getBytes()); // Do a normal delete table.delete(d); - admin.flush(userTableName); + admin.flush(userTableName.getName()); } - admin.majorCompact(userTableName); + admin.majorCompact(userTableName.getName()); List regionsOfTable = TESTING_UTIL.getMiniHBaseCluster() .getMaster().getAssignmentManager().getRegionStates() - .getRegionsOfTable(userTableName.getBytes()); + .getRegionsOfTable(userTableName); HRegionInfo hRegionInfo = regionsOfTable.get(0); Put p = new Put("row6".getBytes()); p.add("col".getBytes(), "ql".getBytes(), "val".getBytes()); @@ -493,17 +496,17 @@ public class TestSplitTransactionOnCluster { p = new Put("row8".getBytes()); p.add("col".getBytes(), "ql".getBytes(), "val".getBytes()); table.put(p); - admin.flush(userTableName); + admin.flush(userTableName.getName()); admin.split(hRegionInfo.getRegionName(), "row7".getBytes()); regionsOfTable = TESTING_UTIL.getMiniHBaseCluster().getMaster() .getAssignmentManager().getRegionStates() - .getRegionsOfTable(userTableName.getBytes()); + .getRegionsOfTable(userTableName); while (regionsOfTable.size() != 2) { Thread.sleep(2000); regionsOfTable = TESTING_UTIL.getMiniHBaseCluster().getMaster() .getAssignmentManager().getRegionStates() - .getRegionsOfTable(userTableName.getBytes()); + .getRegionsOfTable(userTableName); } Assert.assertEquals(2, regionsOfTable.size()); Scan s = new Scan(); @@ -701,22 +704,22 @@ public class TestSplitTransactionOnCluster { @Test(timeout = 60000) public void testTableExistsIfTheSpecifiedTableRegionIsSplitParent() throws Exception { ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TESTING_UTIL); - final byte[] tableName = - Bytes.toBytes("testTableExistsIfTheSpecifiedTableRegionIsSplitParent"); + final TableName tableName = + TableName.valueOf("testTableExistsIfTheSpecifiedTableRegionIsSplitParent"); // Create table then get the single region for our new table. - HTable t = createTableAndWait(tableName, Bytes.toBytes("cf")); + HTable t = createTableAndWait(tableName.getName(), Bytes.toBytes("cf")); List regions = null; try { regions = cluster.getRegions(tableName); int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName()); HRegionServer regionServer = cluster.getRegionServer(regionServerIndex); - insertData(tableName, admin, t); + insertData(tableName.getName(), admin, t); // Turn off balancer so it doesn't cut in and mess up our placements. admin.setBalancerRunning(false, true); // Turn off the meta scanner so it don't remove parent on us. cluster.getMaster().setCatalogJanitorEnabled(false); boolean tableExists = MetaReader.tableExists(regionServer.getCatalogTracker(), - Bytes.toString(tableName)); + tableName); assertEquals("The specified table should present.", true, tableExists); final HRegion region = findSplittableRegion(regions); assertTrue("not able to find a splittable region", region != null); @@ -728,7 +731,7 @@ public class TestSplitTransactionOnCluster { } tableExists = MetaReader.tableExists(regionServer.getCatalogTracker(), - Bytes.toString(tableName)); + tableName); assertEquals("The specified table should present.", true, tableExists); } finally { if (regions != null) { @@ -766,9 +769,10 @@ public class TestSplitTransactionOnCluster { @Test public void testSplitRegionWithNoStoreFiles() throws Exception { - final byte[] tableName = Bytes.toBytes("testSplitRegionWithNoStoreFiles"); + final TableName tableName = + TableName.valueOf("testSplitRegionWithNoStoreFiles"); // Create table then get the single region for our new table. - createTableAndWait(tableName, HConstants.CATALOG_FAMILY); + createTableAndWait(tableName.getName(), HConstants.CATALOG_FAMILY); List regions = cluster.getRegions(tableName); HRegionInfo hri = getAndCheckSingleTableRegion(regions); ensureTableRegionNotOnSameServerAsMeta(admin, hri); @@ -900,7 +904,7 @@ public class TestSplitTransactionOnCluster { @Override void transitionZKNode(Server server, RegionServerServices services, HRegion a, HRegion b) throws IOException { - if (this.currentRegion.getRegionInfo().getTableNameAsString() + if (this.currentRegion.getRegionInfo().getTableName().getNameAsString() .equals("testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack")) { try { if (!secondSplit){ @@ -912,14 +916,14 @@ public class TestSplitTransactionOnCluster { } super.transitionZKNode(server, services, a, b); - if (this.currentRegion.getRegionInfo().getTableNameAsString() + if (this.currentRegion.getRegionInfo().getTableName().getNameAsString() .equals("testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack")) { firstSplitCompleted = true; } } @Override public boolean rollback(Server server, RegionServerServices services) throws IOException { - if (this.currentRegion.getRegionInfo().getTableNameAsString() + if (this.currentRegion.getRegionInfo().getTableName().getNameAsString() .equals("testShouldFailSplitIfZNodeDoesNotExistDueToPrevRollBack")) { if(secondSplit){ super.rollback(server, services); @@ -983,7 +987,7 @@ public class TestSplitTransactionOnCluster { } private void removeDaughterFromMeta(final byte [] regionName) throws IOException { - HTable metaTable = new HTable(TESTING_UTIL.getConfiguration(), HConstants.META_TABLE_NAME); + HTable metaTable = new HTable(TESTING_UTIL.getConfiguration(), TableName.META_TABLE_NAME); try { Delete d = new Delete(regionName); LOG.info("Deleted " + Bytes.toString(regionName)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java index f41cf62..52b3d3d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java @@ -38,7 +38,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; @@ -51,13 +50,13 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.monitoring.MonitoredTask; -import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; @@ -67,14 +66,13 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; import org.apache.hadoop.util.Progressable; import org.junit.experimental.categories.Category; import org.mockito.Mockito; -import com.google.common.base.Joiner; - /** * Test class for the Store */ @@ -141,7 +139,7 @@ public class TestStore extends TestCase { private void init(String methodName, Configuration conf, HColumnDescriptor hcd) throws IOException { - HTableDescriptor htd = new HTableDescriptor(table); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table)); init(methodName, conf, htd, hcd); } @@ -149,6 +147,7 @@ public class TestStore extends TestCase { HColumnDescriptor hcd) throws IOException { //Setting up a Store Path basedir = new Path(DIR+methodName); + Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName()); String logName = "logs"; Path logdir = new Path(basedir, logName); @@ -157,9 +156,9 @@ public class TestStore extends TestCase { fs.delete(logdir, true); htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); HLog hlog = HLogFactory.createHLog(fs, basedir, logName, conf); - HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null); + HRegion region = new HRegion(tableDir, hlog, fs, conf, info, htd, null); store = new HStore(region, hcd, conf); } @@ -841,7 +840,7 @@ public class TestStore extends TestCase { // HTD overrides XML. --anyValue; - HTableDescriptor htd = new HTableDescriptor(table); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table)); HColumnDescriptor hcd = new HColumnDescriptor(family); htd.setConfiguration(CONFIG_KEY, Long.toString(anyValue)); init(getName() + "-htd", conf, htd, hcd); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index 26bdb77..d09ad8b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -33,6 +33,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -89,9 +90,10 @@ public class TestStoreFile extends HBaseTestCase { * @throws Exception */ public void testBasicHalfMapFile() throws Exception { - final HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testBasicHalfMapFileTb")); + final HRegionInfo hri = + new HRegionInfo(TableName.valueOf("testBasicHalfMapFileTb")); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem( - conf, fs, new Path(this.testDir, hri.getTableNameAsString()), hri); + conf, fs, new Path(this.testDir, hri.getTableName().getNameAsString()), hri); StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs, 2 * 1024) .withFilePath(regionFs.createTempName()) @@ -138,9 +140,9 @@ public class TestStoreFile extends HBaseTestCase { * @throws IOException */ public void testReference() throws IOException { - final HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testReferenceTb")); + final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testReferenceTb")); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem( - conf, fs, new Path(this.testDir, hri.getTableNameAsString()), hri); + conf, fs, new Path(this.testDir, hri.getTableName().getNameAsString()), hri); // Make a store file and write data to it. StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs, 8 * 1024) @@ -179,12 +181,12 @@ public class TestStoreFile extends HBaseTestCase { } public void testHFileLink() throws IOException { - final HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testHFileLinkTb")); + final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testHFileLinkTb")); // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/ Configuration testConf = new Configuration(this.conf); FSUtils.setRootDir(testConf, this.testDir); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem( - testConf, fs, new Path(this.testDir, hri.getTableNameAsString()), hri); + testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTableName()), hri); // Make a store file and write data to it. StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs, 8 * 1024) @@ -224,9 +226,9 @@ public class TestStoreFile extends HBaseTestCase { FSUtils.setRootDir(testConf, this.testDir); // adding legal table name chars to verify regex handles it. - HRegionInfo hri = new HRegionInfo(Bytes.toBytes("_original-evil-name")); + HRegionInfo hri = new HRegionInfo(TableName.valueOf("_original-evil-name")); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem( - testConf, fs, new Path(this.testDir, hri.getTableNameAsString()), hri); + testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTableName()), hri); // Make a store file and write data to it. //// StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs, 8 * 1024) @@ -236,9 +238,10 @@ public class TestStoreFile extends HBaseTestCase { Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); // create link to store file. /clone/region//--
NameRegion ServerStart KeyEnd KeyRequests
Fragmentation<%= frags.get(tableName) != null ? frags.get(tableName).intValue() + "%" : "n/a" %><%= frags.get(fqtn) != null ? frags.get(fqtn).intValue() + "%" : "n/a" %> How fragmented is the table. After a major compaction it is 0%.
 
 
<%= escapeXml(htDesc.getNameAsString()) %><%= escapeXml(htDesc.getTableName().getNameAsString()) %> <%= htDesc.toString() %>
- HRegionInfo hriClone = new HRegionInfo(Bytes.toBytes("clone")); + HRegionInfo hriClone = new HRegionInfo(TableName.valueOf("clone")); HRegionFileSystem cloneRegionFs = HRegionFileSystem.createRegionOnFileSystem( - testConf, fs, new Path(this.testDir, hri.getTableNameAsString()), hriClone); + testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTableName()), + hriClone); Path dstPath = cloneRegionFs.getStoreDir(TEST_FAMILY); HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName()); Path linkFilePath = new Path(dstPath, @@ -297,10 +300,12 @@ public class TestStoreFile extends HBaseTestCase { KeyValue midKV = KeyValue.createKeyValueFromKey(midkey); byte [] midRow = midKV.getRow(); // Create top split. - HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTableName(), null, midRow); + HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTableName(), + null, midRow); Path topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, midRow, true); // Create bottom split. - HRegionInfo bottomHri = new HRegionInfo(regionFs.getRegionInfo().getTableName(), midRow, null); + HRegionInfo bottomHri = new HRegionInfo(regionFs.getRegionInfo().getTableName(), + midRow, null); Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false); // Make readers on top and bottom. StoreFile.Reader top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java index 7b78f56..4d9def7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java @@ -46,7 +46,7 @@ public class TestWideScanner extends HBaseTestCase { static byte[][] COLUMNS = { A, B, C }; static final Random rng = new Random(); static final HTableDescriptor TESTTABLEDESC = - new HTableDescriptor("testwidescan"); + new HTableDescriptor(TableName.valueOf("testwidescan")); static { for (byte[] cfName : new byte[][] { A, B, C }) { TESTTABLEDESC.addFamily(new HColumnDescriptor(cfName) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java index 731897a..6a7d72f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java @@ -25,6 +25,7 @@ import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -57,7 +58,7 @@ public class TestCloseRegionHandler { static final Log LOG = LogFactory.getLog(TestCloseRegionHandler.class); private final static HBaseTestingUtility HTU = new HBaseTestingUtility(); private static final HTableDescriptor TEST_HTD = - new HTableDescriptor("TestCloseRegionHandler"); + new HTableDescriptor(TableName.valueOf("TestCloseRegionHandler")); private HRegionInfo TEST_HRI; private int testIndex = 0; @@ -76,7 +77,7 @@ public class TestCloseRegionHandler { */ @Before public void setupHRI() { - TEST_HRI = new HRegionInfo(TEST_HTD.getName(), + TEST_HRI = new HRegionInfo(TEST_HTD.getTableName(), Bytes.toBytes(testIndex), Bytes.toBytes(testIndex + 1)); testIndex++; @@ -95,7 +96,7 @@ public class TestCloseRegionHandler { final RegionServerServices rss = new MockRegionServerServices(); HTableDescriptor htd = TEST_HTD; final HRegionInfo hri = - new HRegionInfo(htd.getName(), HConstants.EMPTY_END_ROW, + new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW, HConstants.EMPTY_END_ROW); HRegion region = HRegion.createHRegion(hri, HTU.getDataTestDir(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java index 538cad1..c23c658 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java @@ -56,7 +56,7 @@ public class TestOpenRegionHandler { @BeforeClass public static void before() throws Exception { HTU.startMiniZKCluster(); - TEST_HTD = new HTableDescriptor("TestOpenRegionHandler.java"); + TEST_HTD = new HTableDescriptor(TableName.valueOf("TestOpenRegionHandler.java")); } @AfterClass public static void after() throws IOException { @@ -71,7 +71,7 @@ public class TestOpenRegionHandler { */ @Before public void setupHRI() { - TEST_HRI = new HRegionInfo(TEST_HTD.getName(), + TEST_HRI = new HRegionInfo(TEST_HTD.getTableName(), Bytes.toBytes(testIndex), Bytes.toBytes(testIndex + 1)); testIndex++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java index 579d249..8479c8e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; @@ -224,7 +225,7 @@ public final class HLogPerformanceEvaluation extends Configured implements Tool } private static HTableDescriptor createHTableDescriptor(final int numFamilies) { - HTableDescriptor htd = new HTableDescriptor(TABLE_NAME); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE_NAME)); for (int i = 0; i < numFamilies; ++i) { HColumnDescriptor colDef = new HColumnDescriptor(FAMILY_PREFIX + i); htd.addFamily(colDef); @@ -296,7 +297,7 @@ public final class HLogPerformanceEvaluation extends Configured implements Tool private HRegion openRegion(final FileSystem fs, final Path dir, final HTableDescriptor htd, final HLog hlog) throws IOException { // Initialize HRegion - HRegionInfo regionInfo = new HRegionInfo(htd.getName()); + HRegionInfo regionInfo = new HRegionInfo(htd.getTableName()); return HRegion.createHRegion(regionInfo, dir, getConf(), htd, hlog); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java index de63feb..b2cc947 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -150,11 +151,11 @@ public class TestDurability { // lifted from TestAtomicOperation private HRegion createHRegion (byte [] tableName, String callingMethod, HLog log, boolean isDeferredLogFlush) throws IOException { - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); htd.setDeferredLogFlush(isDeferredLogFlush); HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); htd.addFamily(hcd); - HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); + HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); Path path = new Path(DIR + callingMethod); if (FS.exists(path)) { if (!FS.delete(path, true)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java index 6ecb95d..5b148a7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java @@ -158,14 +158,15 @@ public class TestHLog { @Test public void testSplit() throws IOException { - final byte [] tableName = Bytes.toBytes(getName()); - final byte [] rowName = tableName; + final TableName tableName = + TableName.valueOf(getName()); + final byte [] rowName = tableName.getName(); Path logdir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME); HLog log = HLogFactory.createHLog(fs, hbaseDir, HConstants.HREGION_LOGDIR_NAME, conf); final int howmany = 3; HRegionInfo[] infos = new HRegionInfo[3]; - Path tabledir = new Path(hbaseDir, getName()); + Path tabledir = FSUtils.getTableDir(hbaseDir, tableName); fs.mkdirs(tabledir); for(int i = 0; i < howmany; i++) { infos[i] = new HRegionInfo(tableName, @@ -213,11 +214,12 @@ public class TestHLog { */ @Test public void Broken_testSync() throws Exception { - byte [] bytes = Bytes.toBytes(getName()); + TableName tableName = + TableName.valueOf(getName()); // First verify that using streams all works. Path p = new Path(dir, getName() + ".fsdos"); FSDataOutputStream out = fs.create(p); - out.write(bytes); + out.write(tableName.getName()); Method syncMethod = null; try { syncMethod = out.getClass().getMethod("hflush", new Class []{}); @@ -234,7 +236,7 @@ public class TestHLog { assertTrue(in.available() > 0); byte [] buffer = new byte [1024]; int read = in.read(buffer); - assertEquals(bytes.length, read); + assertEquals(tableName.getName().length, read); out.close(); in.close(); @@ -244,15 +246,15 @@ public class TestHLog { HLog.Reader reader = null; try { - HRegionInfo info = new HRegionInfo(bytes, + HRegionInfo info = new HRegionInfo(tableName, null,null, false); HTableDescriptor htd = new HTableDescriptor(); - htd.addFamily(new HColumnDescriptor(bytes)); + htd.addFamily(new HColumnDescriptor(tableName.getName())); for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); - kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes)); - wal.append(info, bytes, kvs, System.currentTimeMillis(), htd); + kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName())); + wal.append(info, tableName, kvs, System.currentTimeMillis(), htd); } // Now call sync and try reading. Opening a Reader before you sync just // gives you EOFE. @@ -269,8 +271,8 @@ public class TestHLog { // that has had a sync done on it. for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); - kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes)); - wal.append(info, bytes, kvs, System.currentTimeMillis(), htd); + kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName())); + wal.append(info, tableName, kvs, System.currentTimeMillis(), htd); } reader = HLogFactory.createReader(fs, walPath, conf); count = 0; @@ -288,8 +290,8 @@ public class TestHLog { final byte [] value = new byte[1025 * 1024]; // Make a 1M value. for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); - kvs.add(new KeyValue(Bytes.toBytes(i), bytes, value)); - wal.append(info, bytes, kvs, System.currentTimeMillis(), htd); + kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), value)); + wal.append(info, tableName, kvs, System.currentTimeMillis(), htd); } // Now I should have written out lots of blocks. Sync then read. wal.sync(); @@ -381,7 +383,8 @@ public class TestHLog { */ @Test (timeout=300000) public void testAppendClose() throws Exception { - byte [] tableName = Bytes.toBytes(getName()); + TableName tableName = + TableName.valueOf(getName()); HRegionInfo regioninfo = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false); @@ -390,11 +393,11 @@ public class TestHLog { final int total = 20; HTableDescriptor htd = new HTableDescriptor(); - htd.addFamily(new HColumnDescriptor(tableName)); + htd.addFamily(new HColumnDescriptor(tableName.getName())); for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); - kvs.add(new KeyValue(Bytes.toBytes(i), tableName, tableName)); + kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName())); wal.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd); } // Now call sync to send the data to HDFS datanodes @@ -502,7 +505,8 @@ public class TestHLog { @Test public void testEditAdd() throws IOException { final int COL_COUNT = 10; - final byte [] tableName = Bytes.toBytes("tablename"); + final TableName tableName = + TableName.valueOf("tablename"); final byte [] row = Bytes.toBytes("row"); HLog.Reader reader = null; HLog log = null; @@ -539,7 +543,7 @@ public class TestHLog { HLogKey key = entry.getKey(); WALEdit val = entry.getEdit(); assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName())); - assertTrue(Bytes.equals(tableName, key.getTablename())); + assertTrue(tableName.equals(key.getTablename())); KeyValue kv = val.getKeyValues().get(0); assertTrue(Bytes.equals(row, kv.getRow())); assertEquals((byte)(i + '0'), kv.getValue()[0]); @@ -561,7 +565,8 @@ public class TestHLog { @Test public void testAppend() throws IOException { final int COL_COUNT = 10; - final byte [] tableName = Bytes.toBytes("tablename"); + final TableName tableName = + TableName.valueOf("tablename"); final byte [] row = Bytes.toBytes("row"); Reader reader = null; HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf); @@ -593,7 +598,7 @@ public class TestHLog { for (KeyValue val : entry.getEdit().getKeyValues()) { assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName())); - assertTrue(Bytes.equals(tableName, entry.getKey().getTablename())); + assertTrue(tableName.equals(entry.getKey().getTablename())); assertTrue(Bytes.equals(row, val.getRow())); assertEquals((byte)(idx + '0'), val.getValue()[0]); System.out.println(entry.getKey() + " " + val); @@ -616,7 +621,8 @@ public class TestHLog { @Test public void testVisitors() throws Exception { final int COL_COUNT = 10; - final byte [] tableName = Bytes.toBytes("tablename"); + final TableName tableName = + TableName.valueOf("tablename"); final byte [] row = Bytes.toBytes("row"); HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf); try { @@ -651,8 +657,10 @@ public class TestHLog { @Test public void testLogCleaning() throws Exception { LOG.info("testLogCleaning"); - final byte [] tableName = Bytes.toBytes("testLogCleaning"); - final byte [] tableName2 = Bytes.toBytes("testLogCleaning2"); + final TableName tableName = + TableName.valueOf("testLogCleaning"); + final TableName tableName2 = + TableName.valueOf("testLogCleaning2"); HLog log = HLogFactory.createHLog(fs, hbaseDir, getName(), conf); @@ -749,7 +757,7 @@ public class TestHLog { } } - private void addEdits(HLog log, HRegionInfo hri, byte [] tableName, + private void addEdits(HLog log, HRegionInfo hri, TableName tableName, int times) throws IOException { HTableDescriptor htd = new HTableDescriptor(); htd.addFamily(new HColumnDescriptor("row")); @@ -771,7 +779,8 @@ public class TestHLog { public void testReadLegacyLog() throws IOException { final int columnCount = 5; final int recordCount = 5; - final byte[] tableName = Bytes.toBytes("tablename"); + final TableName tableName = + TableName.valueOf("tablename"); final byte[] row = Bytes.toBytes("row"); long timestamp = System.currentTimeMillis(); Path path = new Path(dir, "temphlog"); @@ -809,7 +818,7 @@ public class TestHLog { assertNotNull(entry); assertEquals(columnCount, entry.getEdit().size()); assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName()); - assertArrayEquals(tableName, entry.getKey().getTablename()); + assertEquals(tableName, entry.getKey().getTablename()); int idx = 0; for (KeyValue val : entry.getEdit().getKeyValues()) { assertTrue(Bytes.equals(row, val.getRow())); @@ -854,7 +863,8 @@ public class TestHLog { private void doRead(boolean withTrailer) throws IOException { final int columnCount = 5; final int recordCount = 5; - final byte[] tableName = Bytes.toBytes("tablename"); + final TableName tableName = + TableName.valueOf("tablename"); final byte[] row = Bytes.toBytes("row"); long timestamp = System.currentTimeMillis(); Path path = new Path(dir, "temphlog"); @@ -896,7 +906,7 @@ public class TestHLog { assertNotNull(entry); assertEquals(columnCount, entry.getEdit().size()); assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName()); - assertArrayEquals(tableName, entry.getKey().getTablename()); + assertEquals(tableName, entry.getKey().getTablename()); int idx = 0; for (KeyValue val : entry.getEdit().getKeyValues()) { assertTrue(Bytes.equals(row, val.getRow())); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogFiltering.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogFiltering.java index df5a6a6..3368273 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogFiltering.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogFiltering.java @@ -28,6 +28,7 @@ import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.client.Delete; @@ -55,7 +56,8 @@ public class TestHLogFiltering { private static final int NUM_MASTERS = 1; private static final int NUM_RS = 4; - private static final byte[] TABLE_NAME = Bytes.toBytes("TestHLogFiltering"); + private static final TableName TABLE_NAME = + TableName.valueOf("TestHLogFiltering"); private static final byte[] CF1 = Bytes.toBytes("MyCF1"); private static final byte[] CF2 = Bytes.toBytes("MyCF2"); private static final byte[][] FAMILIES = { CF1, CF2 }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java index a099b26..5b68f9f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java @@ -42,7 +42,8 @@ import static org.mockito.Mockito.mock; @Category(SmallTests.class) public class TestHLogMethods { private static final byte[] TEST_REGION = Bytes.toBytes("test_region");; - private static final byte[] TEST_TABLE = Bytes.toBytes("test_table"); + private static final TableName TEST_TABLE = + TableName.valueOf("test_table"); private final HBaseTestingUtility util = new HBaseTestingUtility(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java index ef7f85d..9a6da61 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java @@ -42,6 +42,11 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.hbase.TableName; +import org.apache.log4j.Level; +import org.apache.hadoop.hdfs.server.datanode.DataNode; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -68,12 +73,8 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; -import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.ipc.RemoteException; -import org.apache.log4j.Level; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; @@ -114,14 +115,15 @@ public class TestHLogSplit { private static final int NUM_WRITERS = 10; private static final int ENTRIES = 10; // entries per writer per region - private static final byte[] TABLE_NAME = "t1".getBytes(); + private static final TableName TABLE_NAME = + TableName.valueOf("t1"); private static final byte[] FAMILY = "f1".getBytes(); private static final byte[] QUALIFIER = "q1".getBytes(); private static final byte[] VALUE = "v1".getBytes(); private static final String HLOG_FILE_PREFIX = "hlog.dat."; private static List REGIONS = new ArrayList(); private static final String HBASE_SKIP_ERRORS = "hbase.hlog.split.skip.errors"; - private static final Path TABLEDIR = new Path(HBASEDIR, Bytes.toString(TABLE_NAME)); + private static final Path TABLEDIR = FSUtils.getTableDir(HBASEDIR, TABLE_NAME); private static String ROBBER; private static String ZOMBIE; private static String [] GROUP = new String [] {"supergroup"}; @@ -336,14 +338,14 @@ public class TestHLogSplit { public void testRecoveredEditsPathForMeta() throws IOException { FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); byte [] encoded = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(); - Path tdir = new Path(HBASEDIR, Bytes.toString(HConstants.META_TABLE_NAME)); + Path tdir = FSUtils.getTableDir(HBASEDIR, TableName.META_TABLE_NAME); Path regiondir = new Path(tdir, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); fs.mkdirs(regiondir); long now = System.currentTimeMillis(); HLog.Entry entry = new HLog.Entry(new HLogKey(encoded, - HConstants.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID), + TableName.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID), new WALEdit()); Path p = HLogSplitter.getRegionSplitEditsPath(fs, entry, HBASEDIR, true); String parentOfParent = p.getParent().getParent().getName(); @@ -358,14 +360,14 @@ public class TestHLogSplit { public void testOldRecoveredEditsFileSidelined() throws IOException { FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); byte [] encoded = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(); - Path tdir = new Path(HBASEDIR, Bytes.toString(HConstants.META_TABLE_NAME)); + Path tdir = FSUtils.getTableDir(HBASEDIR, TableName.META_TABLE_NAME); Path regiondir = new Path(tdir, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); fs.mkdirs(regiondir); long now = System.currentTimeMillis(); HLog.Entry entry = new HLog.Entry(new HLogKey(encoded, - HConstants.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID), + TableName.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID), new WALEdit()); Path parent = HLogUtil.getRegionDirRecoveredEditsDir(regiondir); assertEquals(parent.getName(), HConstants.RECOVERED_EDITS_DIR); @@ -767,9 +769,10 @@ public class TestHLogSplit { HLogSplitter.split(HBASEDIR, HLOGDIR, OLDLOGDIR, fs, conf); fs.rename(OLDLOGDIR, HLOGDIR); - Path firstSplitPath = new Path(HBASEDIR, Bytes.toString(TABLE_NAME) + ".first"); - Path splitPath = new Path(HBASEDIR, Bytes.toString(TABLE_NAME)); - fs.rename(splitPath, firstSplitPath); + Path firstSplitPath = new Path(HBASEDIR, TABLE_NAME+ ".first"); + Path splitPath = new Path(HBASEDIR, TABLE_NAME.getNameAsString()); + fs.rename(splitPath, + firstSplitPath); fs.initialize(fs.getUri(), conf); HLogSplitter.split(HBASEDIR, HLOGDIR, OLDLOGDIR, fs, conf); @@ -1081,7 +1084,8 @@ public class TestHLogSplit { try { // put some entries in an HLog - byte [] tableName = Bytes.toBytes(this.getClass().getName()); + TableName tableName = + TableName.valueOf(this.getClass().getName()); HRegionInfo regioninfo = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); log = HLogFactory.createHLog(fs, HBASEDIR, logName, conf); @@ -1089,7 +1093,7 @@ public class TestHLogSplit { final int total = 20; for (int i = 0; i < total; i++) { WALEdit kvs = new WALEdit(); - kvs.add(new KeyValue(Bytes.toBytes(i), tableName, tableName)); + kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName())); HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("column")); log.append(regioninfo, tableName, kvs, System.currentTimeMillis(), htd); @@ -1151,7 +1155,7 @@ public class TestHLogSplit { if (stop.get()) { return; } - Path tableDir = new Path(HBASEDIR, new String(TABLE_NAME)); + Path tableDir = FSUtils.getTableDir(HBASEDIR, TABLE_NAME); Path regionDir = new Path(tableDir, REGIONS.get(0)); Path recoveredEdits = new Path(regionDir, HConstants.RECOVERED_EDITS_DIR); String region = "juliet"; @@ -1166,7 +1170,7 @@ public class TestHLogSplit { fs.mkdirs(new Path(tableDir, region)); HLog.Writer writer = HLogFactory.createWriter(fs, julietLog, conf); - appendEntry(writer, "juliet".getBytes(), ("juliet").getBytes(), + appendEntry(writer, TableName.valueOf("juliet"), ("juliet").getBytes(), ("r").getBytes(), FAMILY, QUALIFIER, VALUE, 0); writer.close(); LOG.info("Juliet file creator: created file " + julietLog); @@ -1224,7 +1228,7 @@ public class TestHLogSplit { fs.initialize(fs.getUri(), conf); HLogSplitter.split(HBASEDIR, HLOGDIR, OLDLOGDIR, fs, conf); - Path tdir = HTableDescriptor.getTableDir(HBASEDIR, TABLE_NAME); + Path tdir = FSUtils.getTableDir(HBASEDIR, TABLE_NAME); assertFalse(fs.exists(tdir)); assertEquals(0, countHLog(fs.listStatus(OLDLOGDIR)[0].getPath(), fs, conf)); @@ -1362,9 +1366,9 @@ public class TestHLogSplit { return ws; } - private Path[] getLogForRegion(Path rootdir, byte[] table, String region) + private Path[] getLogForRegion(Path rootdir, TableName table, String region) throws IOException { - Path tdir = HTableDescriptor.getTableDir(rootdir, table); + Path tdir = FSUtils.getTableDir(rootdir, table); @SuppressWarnings("deprecation") Path editsdir = HLogUtil.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, Bytes.toString(region.getBytes()))); @@ -1475,7 +1479,7 @@ public class TestHLogSplit { } - public static long appendEntry(HLog.Writer writer, byte[] table, byte[] region, + public static long appendEntry(HLog.Writer writer, TableName table, byte[] region, byte[] row, byte[] family, byte[] qualifier, byte[] value, long seq) throws IOException { @@ -1487,7 +1491,7 @@ public class TestHLogSplit { } private static HLog.Entry createTestEntry( - byte[] table, byte[] region, + TableName table, byte[] region, byte[] row, byte[] family, byte[] qualifier, byte[] value, long seq) { long time = System.nanoTime(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java index 88ad29d..e0ccc37 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java @@ -119,7 +119,7 @@ public class TestLogRollAbort { // When the META table can be opened, the region servers are running new HTable(TEST_UTIL.getConfiguration(), - HConstants.META_TABLE_NAME).close(); + TableName.META_TABLE_NAME).close(); // Create the test table and open it String tableName = this.getClass().getSimpleName(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index 7b365b8..e67055e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; @@ -190,7 +191,7 @@ public class TestLogRolling { private void startAndWriteData() throws IOException, InterruptedException { // When the META table can be opened, the region servers are running - new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME); + new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME); this.server = cluster.getRegionServerThreads().get(0).getRegionServer(); this.log = server.getWAL(); @@ -330,7 +331,7 @@ public class TestLogRolling { // Create the test table and open it String tableName = getName(); - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc); @@ -426,14 +427,14 @@ public class TestLogRolling { fs.getDefaultReplication() > 1); LOG.info("Replication=" + fs.getDefaultReplication()); // When the META table can be opened, the region servers are running - new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME); + new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME); this.server = cluster.getRegionServer(0); this.log = server.getWAL(); // Create the test table and open it String tableName = getName(); - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc); @@ -583,7 +584,7 @@ public class TestLogRolling { @Test public void testCompactionRecordDoesntBlockRolling() throws Exception { // When the META table can be opened, the region servers are running - new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME); + new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME); String tableName = getName(); HTable table = createTestTable(tableName); @@ -593,9 +594,11 @@ public class TestLogRolling { server = TEST_UTIL.getRSForFirstRegionInTable(Bytes.toBytes(tableName)); this.log = server.getWAL(); FSHLog fshLog = (FSHLog)log; - HRegion region = server.getOnlineRegions(table2.getTableName()).get(0); + HRegion region = server.getOnlineRegions(table2.getName()).get(0); Store s = region.getStore(HConstants.CATALOG_FAMILY); + //have to flush namespace to ensure it doesn't affect wall tests + admin.flush(TableName.NAMESPACE_TABLE_NAME.getName()); // Put some stuff into table2, to make sure we have some files to compact. for (int i = 1; i <= 2; ++i) { @@ -641,7 +644,7 @@ public class TestLogRolling { private HTable createTestTable(String tableName) throws IOException { // Create the test table and open it - HTableDescriptor desc = new HTableDescriptor(tableName); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc); return new HTable(TEST_UTIL.getConfiguration(), tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java index eba84a1..65baff5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java @@ -122,7 +122,7 @@ public class TestLogRollingNoCluster { edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY)); this.wal.append(HRegionInfo.FIRST_META_REGIONINFO, - HTableDescriptor.META_TABLEDESC.getName(), + HTableDescriptor.META_TABLEDESC.getTableName(), edit, now, HTableDescriptor.META_TABLEDESC); } String msg = getName() + " finished"; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java index 86047ad..14d09df 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java @@ -89,7 +89,7 @@ public class TestWALActionsListener { DummyWALActionsListener laterobserver = new DummyWALActionsListener(); HLog hlog = HLogFactory.createHLog(fs, TEST_UTIL.getDataTestDir(), logName, conf, list, null); - HRegionInfo hri = new HRegionInfo(SOME_BYTES, + HRegionInfo hri = new HRegionInfo(TableName.valueOf(SOME_BYTES), SOME_BYTES, SOME_BYTES, false); for (int i = 0; i < 20; i++) { @@ -100,7 +100,7 @@ public class TestWALActionsListener { HTableDescriptor htd = new HTableDescriptor(); htd.addFamily(new HColumnDescriptor(b)); - hlog.append(hri, b, edit, 0, htd); + hlog.append(hri, TableName.valueOf(b), edit, 0, htd); if (i == 10) { hlog.registerWALActionsListener(laterobserver); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java index 8bc4a62..fcca9e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java @@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -150,8 +151,8 @@ public class TestWALReplay { */ @Test public void testReplayEditsAfterRegionMovedWithMultiCF() throws Exception { - final byte[] tableName = Bytes - .toBytes("testReplayEditsAfterRegionMovedWithMultiCF"); + final TableName tableName = + TableName.valueOf("testReplayEditsAfterRegionMovedWithMultiCF"); byte[] family1 = Bytes.toBytes("cf1"); byte[] family2 = Bytes.toBytes("cf2"); byte[] qualifier = Bytes.toBytes("q"); @@ -251,17 +252,17 @@ public class TestWALReplay { public void test2727() throws Exception { // Test being able to have > 1 set of edits in the recovered.edits directory. // Ensure edits are replayed properly. - final String tableNameStr = "test2727"; - HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr); - Path basedir = new Path(hbaseRootDir, tableNameStr); + final TableName tableName = + TableName.valueOf("test2727"); + HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName); + Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName); deleteDir(basedir); - HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr); + HTableDescriptor htd = createBasic3FamilyHTD(tableName); HRegion region2 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd); HRegion.closeHRegion(region2); - final byte [] tableName = Bytes.toBytes(tableNameStr); - final byte [] rowName = tableName; + final byte [] rowName = tableName.getName(); HLog wal1 = createWAL(this.conf); // Add 1k to each family. @@ -312,11 +313,12 @@ public class TestWALReplay { public void testRegionMadeOfBulkLoadedFilesOnly() throws IOException, SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException { - final String tableNameStr = "testReplayEditsWrittenViaHRegion"; - final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr); - final Path basedir = new Path(this.hbaseRootDir, tableNameStr); + final TableName tableName = + TableName.valueOf("testReplayEditsWrittenViaHRegion"); + final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName); + final Path basedir = new Path(this.hbaseRootDir, tableName.getNameAsString()); deleteDir(basedir); - final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr); + final HTableDescriptor htd = createBasic3FamilyHTD(tableName); HRegion region2 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd); HRegion.closeHRegion(region2); @@ -326,7 +328,7 @@ public class TestWALReplay { HFile.Writer writer = HFile.getWriterFactoryNoCache(conf).withPath(fs, f).create(); byte [] family = htd.getFamilies().iterator().next().getName(); - byte [] row = Bytes.toBytes(tableNameStr); + byte [] row = tableName.getName(); writer.append(new KeyValue(row, family, family, row)); writer.close(); List > hfs= new ArrayList>(1); @@ -339,7 +341,7 @@ public class TestWALReplay { // Now 'crash' the region by stealing its wal final Configuration newConf = HBaseConfiguration.create(this.conf); User user = HBaseTestingUtility.getDifferentUser(newConf, - tableNameStr); + tableName.getNameAsString()); user.runAs(new PrivilegedExceptionAction() { public Object run() throws Exception { runWALSplit(newConf); @@ -371,13 +373,14 @@ public class TestWALReplay { public void testReplayEditsWrittenViaHRegion() throws IOException, SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException { - final String tableNameStr = "testReplayEditsWrittenViaHRegion"; - final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr); - final Path basedir = new Path(this.hbaseRootDir, tableNameStr); + final TableName tableName = + TableName.valueOf("testReplayEditsWrittenViaHRegion"); + final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName); + final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName); deleteDir(basedir); - final byte[] rowName = Bytes.toBytes(tableNameStr); + final byte[] rowName = tableName.getName(); final int countPerFamily = 10; - final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr); + final HTableDescriptor htd = createBasic3FamilyHTD(tableName); HRegion region3 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd); HRegion.closeHRegion(region3); @@ -433,7 +436,7 @@ public class TestWALReplay { HBaseTestingUtility.setMaxRecoveryErrorCount(((FSHLog) wal2).getOutputStream(), 1); final Configuration newConf = HBaseConfiguration.create(this.conf); User user = HBaseTestingUtility.getDifferentUser(newConf, - tableNameStr); + tableName.getNameAsString()); user.runAs(new PrivilegedExceptionAction() { public Object run() throws Exception { runWALSplit(newConf); @@ -488,13 +491,14 @@ public class TestWALReplay { public void testReplayEditsAfterPartialFlush() throws IOException, SecurityException, IllegalArgumentException, NoSuchFieldException, IllegalAccessException, InterruptedException { - final String tableNameStr = "testReplayEditsWrittenViaHRegion"; - final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr); - final Path basedir = new Path(this.hbaseRootDir, tableNameStr); + final TableName tableName = + TableName.valueOf("testReplayEditsWrittenViaHRegion"); + final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName); + final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName); deleteDir(basedir); - final byte[] rowName = Bytes.toBytes(tableNameStr); + final byte[] rowName = tableName.getName(); final int countPerFamily = 10; - final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr); + final HTableDescriptor htd = createBasic3FamilyHTD(tableName); HRegion region3 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd); HRegion.closeHRegion(region3); @@ -578,11 +582,12 @@ public class TestWALReplay { */ @Test public void testReplayEditsAfterAbortingFlush() throws IOException { - final String tableNameStr = "testReplayEditsAfterAbortingFlush"; - final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr); - final Path basedir = new Path(this.hbaseRootDir, tableNameStr); + final TableName tableName = + TableName.valueOf("testReplayEditsAfterAbortingFlush"); + final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName); + final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName); deleteDir(basedir); - final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr); + final HTableDescriptor htd = createBasic3FamilyHTD(tableName); HRegion region3 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd); region3.close(); region3.getLog().closeAndDelete(); @@ -605,7 +610,7 @@ public class TestWALReplay { List families = new ArrayList( htd.getFamilies()); for (int i = 0; i < writtenRowCount; i++) { - Put put = new Put(Bytes.toBytes(tableNameStr + Integer.toString(i))); + Put put = new Put(Bytes.toBytes(tableName + Integer.toString(i))); put.add(families.get(i % families.size()).getName(), Bytes.toBytes("q"), Bytes.toBytes("val")); region.put(put); @@ -629,7 +634,7 @@ public class TestWALReplay { // writing more data int moreRow = 10; for (int i = writtenRowCount; i < writtenRowCount + moreRow; i++) { - Put put = new Put(Bytes.toBytes(tableNameStr + Integer.toString(i))); + Put put = new Put(Bytes.toBytes(tableName + Integer.toString(i))); put.add(families.get(i % families.size()).getName(), Bytes.toBytes("q"), Bytes.toBytes("val")); region.put(put); @@ -683,18 +688,18 @@ public class TestWALReplay { */ @Test public void testReplayEditsWrittenIntoWAL() throws Exception { - final String tableNameStr = "testReplayEditsWrittenIntoWAL"; - final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr); - final Path basedir = new Path(hbaseRootDir, tableNameStr); + final TableName tableName = + TableName.valueOf("testReplayEditsWrittenIntoWAL"); + final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName); + final Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName); deleteDir(basedir); - final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr); + final HTableDescriptor htd = createBasic3FamilyHTD(tableName); HRegion region2 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd); HRegion.closeHRegion(region2); final HLog wal = createWAL(this.conf); - final byte[] tableName = Bytes.toBytes(tableNameStr); - final byte[] rowName = tableName; + final byte[] rowName = tableName.getName(); final byte[] regionName = hri.getEncodedNameAsBytes(); // Add 1k to each family. @@ -776,13 +781,15 @@ public class TestWALReplay { @Test // the following test is for HBASE-6065 public void testSequentialEditLogSeqNum() throws IOException { - final String tableNameStr = "testSequentialEditLogSeqNum"; - final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr); - final Path basedir = new Path(this.hbaseRootDir, tableNameStr); + final TableName tableName = + TableName.valueOf("testSequentialEditLogSeqNum"); + final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName); + final Path basedir = + FSUtils.getTableDir(this.hbaseRootDir, tableName); deleteDir(basedir); - final byte[] rowName = Bytes.toBytes(tableNameStr); + final byte[] rowName = tableName.getName(); final int countPerFamily = 10; - final HTableDescriptor htd = createBasic1FamilyHTD(tableNameStr); + final HTableDescriptor htd = createBasic1FamilyHTD(tableName); // Mock the HLog MockHLog wal = createMockWAL(this.conf); @@ -814,8 +821,9 @@ public class TestWALReplay { FileStatus[] listStatus = this.fs.listStatus(wal.getDir()); HLogSplitter.splitLogFile(hbaseRootDir, listStatus[0], this.fs, this.conf, null, null, null); - FileStatus[] listStatus1 = this.fs.listStatus(new Path(hbaseRootDir + "/" - + tableNameStr + "/" + hri.getEncodedName() + "/recovered.edits")); + FileStatus[] listStatus1 = this.fs.listStatus( + new Path(FSUtils.getTableDir(hbaseRootDir, tableName), + new Path(hri.getEncodedName(), "recovered.edits"))); int editCount = 0; for (FileStatus fileStatus : listStatus1) { editCount = Integer.parseInt(fileStatus.getPath().getName()); @@ -842,7 +850,7 @@ public class TestWALReplay { } } - private HTableDescriptor createBasic1FamilyHTD(final String tableName) { + private HTableDescriptor createBasic1FamilyHTD(final TableName tableName) { HTableDescriptor htd = new HTableDescriptor(tableName); HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a")); htd.addFamily(a); @@ -878,7 +886,7 @@ public class TestWALReplay { } } - private void addWALEdits (final byte [] tableName, final HRegionInfo hri, + private void addWALEdits (final TableName tableName, final HRegionInfo hri, final byte [] rowName, final byte [] family, final int count, EnvironmentEdge ee, final HLog wal, final HTableDescriptor htd) throws IOException { @@ -910,8 +918,8 @@ public class TestWALReplay { * column families named 'a','b', and 'c'. * @param tableName Name of table to use when we create HTableDescriptor. */ - private HRegionInfo createBasic3FamilyHRegionInfo(final String tableName) { - return new HRegionInfo(Bytes.toBytes(tableName), null, null, false); + private HRegionInfo createBasic3FamilyHRegionInfo(final TableName tableName) { + return new HRegionInfo(tableName, null, null, false); } /* @@ -945,7 +953,7 @@ public class TestWALReplay { return wal; } - private HTableDescriptor createBasic3FamilyHTD(final String tableName) { + private HTableDescriptor createBasic3FamilyHTD(final TableName tableName) { HTableDescriptor htd = new HTableDescriptor(tableName); HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a")); htd.addFamily(a); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java index ea887bd..86a91a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java @@ -1,4 +1,4 @@ -/* +/** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -66,7 +66,7 @@ public class TestMasterReplication { private MiniZooKeeperCluster miniZK; private static final long SLEEP_TIME = 500; - private static final int NB_RETRIES = 10; + private static final int NB_RETRIES = 100; private static final byte[] tableName = Bytes.toBytes("test"); private static final byte[] famName = Bytes.toBytes("f"); @@ -121,7 +121,7 @@ public class TestMasterReplication { utility3.setZkCluster(miniZK); new ZooKeeperWatcher(conf3, "cluster3", null, true); - table = new HTableDescriptor(tableName); + table = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor fam = new HColumnDescriptor(famName); fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); table.addFamily(fam); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java index cbce1b6..f3ffca7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java @@ -57,7 +57,7 @@ public class TestMultiSlaveReplication { private static HBaseTestingUtility utility2; private static HBaseTestingUtility utility3; private static final long SLEEP_TIME = 500; - private static final int NB_RETRIES = 10; + private static final int NB_RETRIES = 100; private static final byte[] tableName = Bytes.toBytes("test"); private static final byte[] famName = Bytes.toBytes("f"); @@ -105,7 +105,7 @@ public class TestMultiSlaveReplication { utility3.setZkCluster(miniZK); new ZooKeeperWatcher(conf3, "cluster3", null, true); - table = new HTableDescriptor(tableName); + table = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor fam = new HColumnDescriptor(famName); fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); table.addFamily(fam); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java index 1791bd9..c28c6dc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; @@ -119,7 +120,7 @@ public class TestReplicationBase { utility1.startMiniCluster(2); utility2.startMiniCluster(2); - HTableDescriptor table = new HTableDescriptor(tableName); + HTableDescriptor table = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor fam = new HColumnDescriptor(famName); fam.setMaxVersions(3); fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java index 759369e..5866a19 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java @@ -26,6 +26,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; @@ -33,7 +34,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; -import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; @@ -86,7 +86,8 @@ public class TestReplicationSource { KeyValue kv = new KeyValue(b,b,b); WALEdit edit = new WALEdit(); edit.add(kv); - HLogKey key = new HLogKey(b, b, 0, 0, HConstants.DEFAULT_CLUSTER_ID); + HLogKey key = new HLogKey(b, TableName.valueOf(b), 0, 0, + HConstants.DEFAULT_CLUSTER_ID); writer.append(new HLog.Entry(key, edit)); writer.sync(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationHLogReaderManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationHLogReaderManager.java index 670156e..c6b73d0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationHLogReaderManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationHLogReaderManager.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; @@ -59,7 +60,7 @@ public class TestReplicationHLogReaderManager { private static Path hbaseDir; private static FileSystem fs; private static MiniDFSCluster cluster; - private static final byte [] tableName = Bytes.toBytes("tablename"); + private static final TableName tableName = TableName.valueOf("tablename"); private static final byte [] family = Bytes.toBytes("column"); private static final byte [] qualifier = Bytes.toBytes("qualifier"); private static final HRegionInfo info = new HRegionInfo(tableName, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index f5c3c99..8efe0e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -34,6 +34,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ClusterId; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -93,7 +94,8 @@ public class TestReplicationSourceManager { private static final byte[] f1 = Bytes.toBytes("f1"); - private static final byte[] test = Bytes.toBytes("test"); + private static final TableName test = + TableName.valueOf("test"); private static final String slaveId = "1"; @@ -152,7 +154,7 @@ public class TestReplicationSourceManager { col.setScope(HConstants.REPLICATION_SCOPE_LOCAL); htd.addFamily(col); - hri = new HRegionInfo(htd.getName(), r1, r2); + hri = new HRegionInfo(htd.getTableName(), r1, r2); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java index 4d27769..4a5b3ae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java @@ -44,6 +44,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -107,7 +108,7 @@ public class PerformanceEvaluation { protected static final HTableDescriptor TABLE_DESCRIPTOR; static { - TABLE_DESCRIPTOR = new HTableDescriptor(TABLE_NAME); + TABLE_DESCRIPTOR = new HTableDescriptor(TableName.valueOf(TABLE_NAME)); TABLE_DESCRIPTOR.addFamily(new HColumnDescriptor(FAMILY_NAME)); } @@ -448,7 +449,7 @@ public class PerformanceEvaluation { private boolean checkTable() throws IOException { HTableDescriptor tableDescriptor = getTableDescriptor(); RemoteAdmin admin = new RemoteAdmin(new Client(cluster), conf); - if (!admin.isTableAvailable(tableDescriptor.getName())) { + if (!admin.isTableAvailable(tableDescriptor.getTableName().getName())) { admin.createTable(tableDescriptor); return true; } @@ -1069,7 +1070,7 @@ public class PerformanceEvaluation { Test t = null; TestOptions options = new TestOptions(startRow, perClientRunRows, - totalRows, getTableDescriptor().getName(), rowsPerPut); + totalRows, getTableDescriptor().getTableName().getName(), rowsPerPut); try { Constructor constructor = cmd.getDeclaredConstructor( Configuration.class, TestOptions.class, Status.class); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java index 4fe7b6a..ea0ad9b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java @@ -66,7 +66,7 @@ public class TestGzipFilter { if (admin.tableExists(TABLE)) { return; } - HTableDescriptor htd = new HTableDescriptor(TABLE); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); htd.addFamily(new HColumnDescriptor(CFA)); admin.createTable(htd); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java index e14336c..2aa81bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java @@ -83,7 +83,7 @@ public class TestMultiRowResource { if (admin.tableExists(TABLE)) { return; } - HTableDescriptor htd = new HTableDescriptor(TABLE); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); htd.addFamily(new HColumnDescriptor(CFA)); htd.addFamily(new HColumnDescriptor(CFB)); admin.createTable(htd); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java index 47f2f64..fd3a974 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestRowResource.java @@ -93,7 +93,7 @@ public class TestRowResource { if (admin.tableExists(TABLE)) { return; } - HTableDescriptor htd = new HTableDescriptor(TABLE); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); htd.addFamily(new HColumnDescriptor(CFA)); htd.addFamily(new HColumnDescriptor(CFB)); admin.createTable(htd); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java index 50d5398..e932a80 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannerResource.java @@ -166,7 +166,7 @@ public class TestScannerResource { if (admin.tableExists(TABLE)) { return; } - HTableDescriptor htd = new HTableDescriptor(TABLE); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); htd.addFamily(new HColumnDescriptor(CFA)); htd.addFamily(new HColumnDescriptor(CFB)); admin.createTable(htd); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java index 3444854..0fc97e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java @@ -130,7 +130,7 @@ public class TestScannersWithFilters { REST_TEST_UTIL.getServletPort())); HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); if (!admin.tableExists(TABLE)) { - HTableDescriptor htd = new HTableDescriptor(TABLE); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); htd.addFamily(new HColumnDescriptor(FAMILIES[0])); htd.addFamily(new HColumnDescriptor(FAMILIES[1])); admin.createTable(htd); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java index 700e663..0a7b9a6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java @@ -27,6 +27,7 @@ import javax.xml.bind.JAXBException; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.rest.client.Client; import org.apache.hadoop.hbase.rest.client.Cluster; import org.apache.hadoop.hbase.rest.client.Response; @@ -42,7 +43,7 @@ import org.junit.experimental.categories.Category; @Category(MediumTests.class) public class TestStatusResource { - private static final byte[] META_REGION_NAME = Bytes.toBytes(".META.,,1"); + private static final byte[] META_REGION_NAME = Bytes.toBytes(TableName.META_TABLE_NAME+",,1"); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static final HBaseRESTTestingUtility REST_TEST_UTIL = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java index c62e3ad..aac2ab7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java @@ -82,7 +82,7 @@ public class TestTableResource { if (admin.tableExists(TABLE)) { return; } - HTableDescriptor htd = new HTableDescriptor(TABLE); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY)); admin.createTable(htd); HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java index 67fe29f..c7a14fe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java @@ -77,7 +77,7 @@ public class TestRemoteTable { REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); if (!admin.tableExists(TABLE)) { - HTableDescriptor htd = new HTableDescriptor(TABLE); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); htd.addFamily(new HColumnDescriptor(COLUMN_1).setMaxVersions(3)); htd.addFamily(new HColumnDescriptor(COLUMN_2).setMaxVersions(3)); htd.addFamily(new HColumnDescriptor(COLUMN_3).setMaxVersions(3)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java index ff98a2a..a851ece 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java @@ -28,6 +28,7 @@ import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Base64; import org.apache.hadoop.hbase.util.Bytes; @@ -42,8 +43,8 @@ public class TestStorageClusterStatusModel extends TestCase { "" + "" + - "" + ""; private static final String AS_PB = - "CjsKBXRlc3QxEOO6i+eeJBgAIIABKIAIMiMKCS1ST09ULSwsMBABGAEgACgAMAA4AUACSAFQAVgB" + - "YAFoAQpHCgV0ZXN0MhD+krHwniQYACCABCiACDIvChUuTUVUQS4sLDEyNDYwMDAwNDM3MjQQARgB" + - "IAAoADAAOAFAAkgBUAFYAWABaAEYAiAAKQAAAAAAAPA/"; + "Cj8KBXRlc3QxEOO6i+eeJBgAIIABKIAIMicKDWhiYXNlOnJvb3QsLDAQARgBIAAoADAAOAFAAkgB"+ + "UAFYAWABaAEKSwoFdGVzdDIQ/pKx8J4kGAAggAQogAgyMwoZaGJhc2U6bWV0YSwsMTI0NjAwMDA0"+ + "MzcyNBABGAEgACgAMAA4AUACSAFQAVgBYAFoARgCIAApAAAAAAAA8D8="; + private JAXBContext context; @@ -76,9 +78,9 @@ public class TestStorageClusterStatusModel extends TestCase { model.setRequests(0); model.setAverageLoad(1.0); model.addLiveNode("test1", 1245219839331L, 128, 1024) - .addRegion(Bytes.toBytes("-ROOT-,,0"), 1, 1, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1); + .addRegion(Bytes.toBytes(TableName.ROOT_TABLE_NAME+",,0"), 1, 1, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1); model.addLiveNode("test2", 1245239331198L, 512, 1024) - .addRegion(Bytes.toBytes(".META.,,1246000043724"),1, 1, 0, 0, 0, + .addRegion(Bytes.toBytes(TableName.META_TABLE_NAME+",,1246000043724"),1, 1, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1); return model; } @@ -119,7 +121,8 @@ public class TestStorageClusterStatusModel extends TestCase { Iterator regions = node.getRegions().iterator(); StorageClusterStatusModel.Node.Region region = regions.next(); - assertTrue(Bytes.toString(region.getName()).equals("-ROOT-,,0")); + assertTrue(Bytes.toString(region.getName()).equals( + TableName.ROOT_TABLE_NAME+",,0")); assertEquals(region.getStores(), 1); assertEquals(region.getStorefiles(), 1); assertEquals(region.getStorefileSizeMB(), 0); @@ -140,7 +143,8 @@ public class TestStorageClusterStatusModel extends TestCase { assertEquals(node.getMaxHeapSizeMB(), 1024); regions = node.getRegions().iterator(); region = regions.next(); - assertEquals(Bytes.toString(region.getName()), ".META.,,1246000043724"); + assertEquals(Bytes.toString(region.getName()), + TableName.META_TABLE_NAME+",,1246000043724"); assertEquals(region.getStores(), 1); assertEquals(region.getStorefiles(), 1); assertEquals(region.getStorefileSizeMB(), 0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java index eaa0736..12b8684 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java @@ -88,7 +88,7 @@ public class TestTableRegionModel extends TestCase { public void testGetName() { TableRegionModel model = buildTestModel(); String modelName = model.getName(); - HRegionInfo hri = new HRegionInfo(Bytes.toBytes(TABLE), + HRegionInfo hri = new HRegionInfo(TableName.valueOf(TABLE), START_KEY, END_KEY, false, ID); assertEquals(modelName, hri.getRegionNameAsString()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java index 426c11f..901d979 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java @@ -32,6 +32,7 @@ import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.client.HTable; @@ -61,7 +62,8 @@ public class TestAccessControlFilter { private static User LIMITED; private static User DENIED; - private static byte[] TABLE = Bytes.toBytes("testtable"); + private static TableName TABLE = + TableName.valueOf("testtable"); private static byte[] FAMILY = Bytes.toBytes("f1"); private static byte[] PRIVATE_COL = Bytes.toBytes("private"); private static byte[] PUBLIC_COL = Bytes.toBytes("public"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index a09f048..c50f1aa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -80,13 +81,10 @@ import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost; import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.security.access.AccessControlLists; -import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.Permission.Action; -import org.apache.hadoop.hbase.security.access.UserPermission; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; -import org.apache.hadoop.hbase.util.TableName; +import org.apache.hadoop.hbase.util.TestTableName; import org.junit.After; import org.junit.AfterClass; @@ -97,7 +95,6 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import com.google.protobuf.BlockingRpcChannel; -import com.google.protobuf.ByteString; import com.google.protobuf.ServiceException; /** @@ -108,7 +105,7 @@ import com.google.protobuf.ServiceException; @SuppressWarnings("rawtypes") public class TestAccessController { private static final Log LOG = LogFactory.getLog(TestAccessController.class); - @Rule public TableName TEST_TABLE = new TableName(); + @Rule public TestTableName TEST_TABLE = new TestTableName(); private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static Configuration conf; @@ -127,7 +124,8 @@ public class TestAccessController { // user with no permissions private static User USER_NONE; - private static byte[] TEST_TABLE2 = Bytes.toBytes("testtable2"); + private static TableName TEST_TABLE2 = + TableName.valueOf("testtable2"); private static byte[] TEST_FAMILY = Bytes.toBytes("f1"); private static MasterCoprocessorEnvironment CP_ENV; @@ -183,7 +181,7 @@ public class TestAccessController { htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); htd.setOwner(USER_OWNER); admin.createTable(htd); - TEST_UTIL.waitTableEnabled(TEST_TABLE.getTableName()); + TEST_UTIL.waitTableEnabled(TEST_TABLE.getTableName().getName()); HRegion region = TEST_UTIL.getHBaseCluster().getRegions(TEST_TABLE.getTableName()).get(0); RegionCoprocessorHost rcpHost = region.getCoprocessorHost(); @@ -193,7 +191,7 @@ public class TestAccessController { // initilize access control HTable acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getTableName()); + BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getTableName().getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); @@ -232,7 +230,7 @@ public class TestAccessController { TEST_UTIL.deleteTable(TEST_TABLE.getTableName()); } catch (TableNotFoundException ex) { // Test deleted the table, no problem - LOG.info("Test deleted table " + Bytes.toString(TEST_TABLE.getTableName())); + LOG.info("Test deleted table " + TEST_TABLE.getTableName()); } assertEquals(0, AccessControlLists.getTablePermissions(conf, TEST_TABLE.getTableName()).size()); } @@ -314,7 +312,7 @@ public class TestAccessController { public void testTableCreate() throws Exception { PrivilegedExceptionAction createTable = new PrivilegedExceptionAction() { public Object run() throws Exception { - HTableDescriptor htd = new HTableDescriptor("testnewtable"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testnewtable")); htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); ACCESS_CONTROLLER.preCreateTable(ObserverContext.createAndPrepare(CP_ENV, null), htd, null); return null; @@ -417,7 +415,7 @@ public class TestAccessController { PrivilegedExceptionAction disableAclTable = new PrivilegedExceptionAction() { public Object run() throws Exception { ACCESS_CONTROLLER.preDisableTable(ObserverContext.createAndPrepare(CP_ENV, null), - AccessControlLists.ACL_TABLE_NAME); + AccessControlLists.ACL_TABLE); return null; } }; @@ -873,7 +871,7 @@ public class TestAccessController { } private void bulkLoadHFile( - byte[] tableName, + TableName tableName, byte[] family, byte[] qualifier, byte[][][] hfileRanges, @@ -894,7 +892,7 @@ public class TestAccessController { HTable table = new HTable(conf, tableName); try { HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); - TEST_UTIL.waitTableEnabled(admin, tableName); + TEST_UTIL.waitTableEnabled(admin, tableName.getName()); LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf); loader.doBulkLoad(loadPath, table); } finally { @@ -948,7 +946,7 @@ public class TestAccessController { public Object run() throws Exception { HTable acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getTableName()); + BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getTableName().getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.grant(protocol, USER_RO.getShortName(), TEST_TABLE.getTableName(), @@ -964,7 +962,7 @@ public class TestAccessController { public Object run() throws Exception { HTable acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getTableName()); + BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getTableName().getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.revoke(protocol, USER_RO.getShortName(), TEST_TABLE.getTableName(), @@ -980,7 +978,7 @@ public class TestAccessController { public Object run() throws Exception { HTable acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getTableName()); + BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getTableName().getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.getUserPermissions(protocol, TEST_TABLE.getTableName()); @@ -1003,7 +1001,8 @@ public class TestAccessController { @Test public void testPostGrantRevoke() throws Exception { - final byte[] tableName = Bytes.toBytes("TempTable"); + final TableName tableName = + TableName.valueOf("TempTable"); final byte[] family1 = Bytes.toBytes("f1"); final byte[] family2 = Bytes.toBytes("f2"); final byte[] qualifier = Bytes.toBytes("q"); @@ -1159,7 +1158,7 @@ public class TestAccessController { // grant table read permission HTable acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(tableName); + BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.grant(protocol, tblUser.getShortName(), @@ -1183,7 +1182,7 @@ public class TestAccessController { // grant table write permission acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(tableName); + BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.grant(protocol, tblUser.getShortName(), @@ -1207,7 +1206,7 @@ public class TestAccessController { // revoke table permission acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(tableName); + BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.grant(protocol, tblUser.getShortName(), tableName, null, null, @@ -1231,7 +1230,7 @@ public class TestAccessController { // grant column family read permission acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(tableName); + BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.grant(protocol, tblUser.getShortName(), @@ -1257,7 +1256,7 @@ public class TestAccessController { // grant column family write permission acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(tableName); + BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.grant(protocol, tblUser.getShortName(), @@ -1284,7 +1283,7 @@ public class TestAccessController { // revoke column family permission acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(tableName); + BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.revoke(protocol, tblUser.getShortName(), tableName, family2, null); @@ -1317,7 +1316,8 @@ public class TestAccessController { @Test public void testPostGrantRevokeAtQualifierLevel() throws Exception { - final byte[] tableName = Bytes.toBytes("testGrantRevokeAtQualifierLevel"); + final TableName tableName = + TableName.valueOf("testGrantRevokeAtQualifierLevel"); final byte[] family1 = Bytes.toBytes("f1"); final byte[] family2 = Bytes.toBytes("f2"); final byte[] qualifier = Bytes.toBytes("q"); @@ -1379,7 +1379,7 @@ public class TestAccessController { HTable acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(tableName); + BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.revoke(protocol, user.getShortName(), tableName, family1, null); @@ -1395,7 +1395,7 @@ public class TestAccessController { acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(tableName); + BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.grant(protocol, user.getShortName(), @@ -1414,7 +1414,7 @@ public class TestAccessController { // TODO: comment this portion after HBASE-3583 acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(tableName); + BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.grant(protocol, user.getShortName(), @@ -1432,7 +1432,7 @@ public class TestAccessController { // grant both read and write permission. acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(tableName); + BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.grant(protocol, user.getShortName(), @@ -1451,7 +1451,7 @@ public class TestAccessController { // revoke family level permission won't impact column level. acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(tableName); + BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.revoke(protocol, user.getShortName(), @@ -1473,7 +1473,8 @@ public class TestAccessController { @Test public void testPermissionList() throws Exception { - final byte[] tableName = Bytes.toBytes("testPermissionList2"); + final TableName tableName = + TableName.valueOf("testPermissionList"); final byte[] family1 = Bytes.toBytes("f1"); final byte[] family2 = Bytes.toBytes("f2"); final byte[] qualifier = Bytes.toBytes("q"); @@ -1494,7 +1495,7 @@ public class TestAccessController { HTable acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(tableName); + BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); perms = ProtobufUtil.getUserPermissions(protocol, tableName); @@ -1518,7 +1519,7 @@ public class TestAccessController { // grant read permission acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(tableName); + BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.grant(protocol, user.getShortName(), @@ -1541,7 +1542,7 @@ public class TestAccessController { // grant read+write acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(tableName); + BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.grant(protocol, user.getShortName(), @@ -1559,7 +1560,7 @@ public class TestAccessController { acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(tableName); + BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.revoke(protocol, user.getShortName(), tableName, family1, qualifier, @@ -1581,7 +1582,7 @@ public class TestAccessController { acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(tableName); + BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); perms = ProtobufUtil.getUserPermissions(protocol, tableName); @@ -1611,7 +1612,7 @@ public class TestAccessController { acl.close(); } UserPermission adminPerm = new UserPermission(Bytes.toBytes(USER_ADMIN.getShortName()), - AccessControlLists.ACL_TABLE_NAME, null, null, Bytes.toBytes("ACRW")); + AccessControlLists.ACL_TABLE, null, null, Bytes.toBytes("ACRW")); assertTrue("Only user admin has permission on table _acl_ per setup", perms.size() == 1 && hasFoundUserPermission(adminPerm, perms)); } @@ -1648,7 +1649,7 @@ public class TestAccessController { } } - public void checkTablePerms(byte[] table, byte[] family, byte[] column, + public void checkTablePerms(TableName table, byte[] family, byte[] column, Permission.Action... actions) throws IOException { Permission[] perms = new Permission[actions.length]; for (int i = 0; i < actions.length; i++) { @@ -1658,7 +1659,7 @@ public class TestAccessController { checkTablePerms(table, perms); } - public void checkTablePerms(byte[] table, Permission... perms) throws IOException { + public void checkTablePerms(TableName table, Permission... perms) throws IOException { CheckPermissionsRequest.Builder request = CheckPermissionsRequest.newBuilder(); for (Permission p : perms) { request.addPermission(ProtobufUtil.toPermission(p)); @@ -1812,7 +1813,7 @@ public class TestAccessController { // check for wrong table region CheckPermissionsRequest checkRequest = CheckPermissionsRequest.newBuilder() .addPermission(AccessControlProtos.Permission.newBuilder() - .setTable(ByteString.copyFrom(TEST_TABLE.getTableName())) + .setTableName(ProtobufUtil.toProtoTableName(TEST_TABLE.getTableName())) .addAction(AccessControlProtos.Permission.Action.CREATE) ).build(); acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); @@ -1927,7 +1928,7 @@ public class TestAccessController { // permissions for the new user. HTable acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getTableName()); + BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getTableName().getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); String currentUser = User.getCurrent().getShortName(); @@ -2006,7 +2007,7 @@ public class TestAccessController { // Grant TABLE ADMIN privs HTable acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getTableName()); + BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getTableName().getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.grant(protocol, TABLE_ADMIN.getShortName(), TEST_TABLE.getTableName(), @@ -2053,7 +2054,7 @@ public class TestAccessController { // Grant TABLE ADMIN privs HTable acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME); try { - BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getTableName()); + BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getTableName().getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.grant(protocol, TABLE_ADMIN.getShortName(), TEST_TABLE.getTableName(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java index 86d874a..d943bc0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java @@ -38,6 +38,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.LargeTests; @@ -80,8 +81,10 @@ public class TestTablePermissions { } }; - private static byte[] TEST_TABLE = Bytes.toBytes("perms_test"); - private static byte[] TEST_TABLE2 = Bytes.toBytes("perms_test2"); + private static TableName TEST_TABLE = + TableName.valueOf("perms_test"); + private static TableName TEST_TABLE2 = + TableName.valueOf("perms_test2"); private static byte[] TEST_FAMILY = Bytes.toBytes("f1"); private static byte[] TEST_QUALIFIER = Bytes.toBytes("col1"); @@ -113,7 +116,7 @@ public class TestTablePermissions { Configuration conf = UTIL.getConfiguration(); AccessControlLists.removeTablePermissions(conf, TEST_TABLE); AccessControlLists.removeTablePermissions(conf, TEST_TABLE2); - AccessControlLists.removeTablePermissions(conf, AccessControlLists.ACL_TABLE_NAME); + AccessControlLists.removeTablePermissions(conf, AccessControlLists.ACL_TABLE); } /** @@ -187,8 +190,8 @@ public class TestTablePermissions { assertNotNull("Should have permissions for george", userPerms); assertEquals("Should have 1 permission for george", 1, userPerms.size()); TablePermission permission = userPerms.get(0); - assertTrue("Permission should be for " + TEST_TABLE, - Bytes.equals(TEST_TABLE, permission.getTable())); + assertEquals("Permission should be for " + TEST_TABLE, + TEST_TABLE, permission.getTable()); assertNull("Column family should be empty", permission.getFamily()); // check actions @@ -202,8 +205,8 @@ public class TestTablePermissions { assertNotNull("Should have permissions for hubert", userPerms); assertEquals("Should have 1 permission for hubert", 1, userPerms.size()); permission = userPerms.get(0); - assertTrue("Permission should be for " + TEST_TABLE, - Bytes.equals(TEST_TABLE, permission.getTable())); + assertEquals("Permission should be for " + TEST_TABLE, + TEST_TABLE, permission.getTable()); assertNull("Column family should be empty", permission.getFamily()); // check actions @@ -217,8 +220,8 @@ public class TestTablePermissions { assertNotNull("Should have permissions for humphrey", userPerms); assertEquals("Should have 1 permission for humphrey", 1, userPerms.size()); permission = userPerms.get(0); - assertTrue("Permission should be for " + TEST_TABLE, - Bytes.equals(TEST_TABLE, permission.getTable())); + assertEquals("Permission should be for " + TEST_TABLE, + TEST_TABLE, permission.getTable()); assertTrue("Permission should be for family " + TEST_FAMILY, Bytes.equals(TEST_FAMILY, permission.getFamily())); assertTrue("Permission should be for qualifier " + TEST_QUALIFIER, @@ -237,7 +240,7 @@ public class TestTablePermissions { TablePermission.Action.READ, TablePermission.Action.WRITE)); // check full load - Map> allPerms = + Map> allPerms = AccessControlLists.loadAll(conf); assertEquals("Full permission map should have entries for both test tables", 2, allPerms.size()); @@ -246,7 +249,7 @@ public class TestTablePermissions { assertNotNull(userPerms); assertEquals(1, userPerms.size()); permission = userPerms.get(0); - assertTrue(Bytes.equals(TEST_TABLE, permission.getTable())); + assertEquals(TEST_TABLE, permission.getTable()); assertEquals(1, permission.getActions().length); assertEquals(TablePermission.Action.READ, permission.getActions()[0]); @@ -254,7 +257,7 @@ public class TestTablePermissions { assertNotNull(userPerms); assertEquals(1, userPerms.size()); permission = userPerms.get(0); - assertTrue(Bytes.equals(TEST_TABLE2, permission.getTable())); + assertEquals(TEST_TABLE2, permission.getTable()); assertEquals(2, permission.getActions().length); actions = Arrays.asList(permission.getActions()); assertTrue(actions.contains(TablePermission.Action.READ)); @@ -290,7 +293,7 @@ public class TestTablePermissions { table.put(new Put(Bytes.toBytes("row2")) .add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v2"))); HBaseAdmin admin = UTIL.getHBaseAdmin(); - admin.split(TEST_TABLE); + admin.split(TEST_TABLE.getName()); // wait for split Thread.sleep(10000); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionsWatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionsWatcher.java index 0a17e4e..ca5b066 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionsWatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionsWatcher.java @@ -29,9 +29,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.LargeTests; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -62,7 +62,8 @@ public class TestZKPermissionsWatcher { } }; - private static byte[] TEST_TABLE = Bytes.toBytes("perms_test"); + private static TableName TEST_TABLE = + TableName.valueOf("perms_test"); @BeforeClass public static void beforeClass() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java index 4f816f7..c8ada8a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java @@ -38,6 +38,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterId; import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MediumTests; @@ -216,7 +217,8 @@ public class TestTokenAuthentication { public Configuration getConfiguration() { return conf; } @Override - public HTableInterface getTable(byte[] tableName) throws IOException { return null; } + public HTableInterface getTable(TableName tableName) throws IOException + { return null; } }); started = true; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java index d232cf5..afba048 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -84,14 +85,15 @@ public class SnapshotTestingUtils { * name and table match the passed in parameters. */ public static List assertExistsMatchingSnapshot( - HBaseAdmin admin, String snapshotName, String tableName) + HBaseAdmin admin, String snapshotName, TableName tableName) throws IOException { // list the snapshot List snapshots = admin.listSnapshots(); List returnedSnapshots = new ArrayList(); for (SnapshotDescription sd : snapshots) { - if (snapshotName.equals(sd.getName()) && tableName.equals(sd.getTable())) { + if (snapshotName.equals(sd.getName()) && + tableName.equals(TableName.valueOf(sd.getTable()))) { returnedSnapshots.add(sd); } } @@ -105,7 +107,8 @@ public class SnapshotTestingUtils { */ public static void assertOneSnapshotThatMatches(HBaseAdmin admin, SnapshotDescription snapshot) throws IOException { - assertOneSnapshotThatMatches(admin, snapshot.getName(), snapshot.getTable()); + assertOneSnapshotThatMatches(admin, snapshot.getName(), + TableName.valueOf(snapshot.getTable())); } /** @@ -113,14 +116,14 @@ public class SnapshotTestingUtils { * name and table match the passed in parameters. */ public static List assertOneSnapshotThatMatches( - HBaseAdmin admin, String snapshotName, String tableName) + HBaseAdmin admin, String snapshotName, TableName tableName) throws IOException { // list the snapshot List snapshots = admin.listSnapshots(); assertEquals("Should only have 1 snapshot", 1, snapshots.size()); assertEquals(snapshotName, snapshots.get(0).getName()); - assertEquals(tableName, snapshots.get(0).getTable()); + assertEquals(tableName, TableName.valueOf(snapshots.get(0).getTable())); return snapshots; } @@ -130,16 +133,16 @@ public class SnapshotTestingUtils { * name and table match the passed in parameters. */ public static List assertOneSnapshotThatMatches( - HBaseAdmin admin, byte[] snapshot, byte[] tableName) throws IOException { + HBaseAdmin admin, byte[] snapshot, TableName tableName) throws IOException { return assertOneSnapshotThatMatches(admin, Bytes.toString(snapshot), - Bytes.toString(tableName)); + tableName); } /** * Multi-family version of the confirmSnapshotValid function */ public static void confirmSnapshotValid( - SnapshotDescription snapshotDescriptor, byte[] tableName, + SnapshotDescription snapshotDescriptor, TableName tableName, List nonEmptyTestFamilies, List emptyTestFamilies, Path rootDir, HBaseAdmin admin, FileSystem fs, boolean requireLogs, Path logsDir, Set snapshotServers) throws IOException { @@ -163,7 +166,7 @@ public class SnapshotTestingUtils { * be in the snapshot. */ public static void confirmSnapshotValid( - SnapshotDescription snapshotDescriptor, byte[] tableName, + SnapshotDescription snapshotDescriptor, TableName tableName, byte[] testFamily, Path rootDir, HBaseAdmin admin, FileSystem fs, boolean requireLogs, Path logsDir, Set snapshotServers) throws IOException { @@ -176,7 +179,7 @@ public class SnapshotTestingUtils { * be in the snapshot. */ public static void confirmSnapshotValid( - SnapshotDescription snapshotDescriptor, byte[] tableName, + SnapshotDescription snapshotDescriptor, TableName tableName, byte[] testFamily, Path rootDir, HBaseAdmin admin, FileSystem fs, boolean requireLogs, Path logsDir, boolean familyEmpty, Set snapshotServers) throws IOException { @@ -194,7 +197,7 @@ public class SnapshotTestingUtils { } // check the table info HTableDescriptor desc = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, - Bytes.toString(tableName)); + tableName); HTableDescriptor snapshotDesc = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir); assertEquals(desc, snapshotDesc); @@ -356,10 +359,10 @@ public class SnapshotTestingUtils { * in the case of an offline snapshot. */ public static void createOfflineSnapshotAndValidate(HBaseAdmin admin, - String tableNameString, String familyName, String snapshotNameString, + TableName tableName, String familyName, String snapshotNameString, Path rootDir, FileSystem fs, boolean familyEmpty) throws Exception { - createSnapshotAndValidate(admin, tableNameString, familyName, + createSnapshotAndValidate(admin, tableName, familyName, snapshotNameString, rootDir, fs, familyEmpty, false); } @@ -369,29 +372,28 @@ public class SnapshotTestingUtils { * in the case of an offline snapshot. */ public static void createSnapshotAndValidate(HBaseAdmin admin, - String tableNameString, String familyName, String snapshotNameString, + TableName tableName, String familyName, String snapshotNameString, Path rootDir, FileSystem fs, boolean familyEmpty, boolean onlineSnapshot) throws Exception { - byte[] tableName = Bytes.toBytes(tableNameString); if (!onlineSnapshot) { try { - admin.disableTable(tableNameString); + admin.disableTable(tableName); } catch (TableNotEnabledException tne) { - LOG.info("In attempting to disable " + tableNameString + LOG.info("In attempting to disable " + tableName + " it turns out that this table is already disabled."); } } - admin.snapshot(snapshotNameString, tableNameString); + admin.snapshot(snapshotNameString, tableName); List snapshots = SnapshotTestingUtils .assertExistsMatchingSnapshot(admin, snapshotNameString, - tableNameString); + tableName); if (snapshots == null || snapshots.size() != 1) { Assert.fail("Incorrect number of snapshots for table " - + String.valueOf(tableNameString)); + + tableName); } SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), tableName, @@ -399,9 +401,9 @@ public class SnapshotTestingUtils { HConstants.HREGION_LOGDIR_NAME), familyEmpty, null); } public static void createSnapshotAndValidate(HBaseAdmin admin, - String tableNameString, String familyName, String snapshotNameString, + TableName tableName, String familyName, String snapshotNameString, Path rootDir, FileSystem fs) throws Exception { - createSnapshotAndValidate(admin, tableNameString, familyName, + createSnapshotAndValidate(admin, tableName, familyName, snapshotNameString, rootDir, fs, false, false); } @@ -412,30 +414,30 @@ public class SnapshotTestingUtils { * */ public static void createSnapshotAndValidate(HBaseAdmin admin, - String tableNameString, String familyName, String snapshotNameString, + TableName tableName, String familyName, String snapshotNameString, Path rootDir, FileSystem fs, boolean online) throws Exception { - createSnapshotAndValidate(admin, tableNameString, familyName, + createSnapshotAndValidate(admin, tableName, familyName, snapshotNameString, rootDir, fs, false, online); } public static void createSnapshotAndValidate(HBaseAdmin admin, - String tableNameString, List nonEmptyFamilyNames, List emptyFamilyNames, + TableName tableName, List nonEmptyFamilyNames, List emptyFamilyNames, String snapshotNameString, Path rootDir, FileSystem fs) throws Exception { - byte[] tableName = Bytes.toBytes(tableNameString); try { - admin.disableTable(tableNameString); + admin.disableTable(tableName); } catch (TableNotEnabledException tne) { - LOG.info("In attempting to disable " + tableNameString + " it turns out that the this table is already disabled."); + LOG.info("In attempting to disable " + tableName + " it turns out that the this table is " + + "already disabled."); } - admin.snapshot(snapshotNameString, tableNameString); + admin.snapshot(snapshotNameString, tableName); List snapshots = SnapshotTestingUtils.assertExistsMatchingSnapshot(admin, - snapshotNameString, tableNameString); + snapshotNameString, tableName); // Create test-timestamp-clone if (snapshots == null || snapshots.size() != 1) { - Assert.fail("Incorrect number of snapshots for table " + String.valueOf(tableNameString)); + Assert.fail("Incorrect number of snapshots for table " + tableName); } SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), tableName, nonEmptyFamilyNames, emptyFamilyNames, @@ -445,7 +447,8 @@ public class SnapshotTestingUtils { // ========================================================================== // Table Helpers // ========================================================================== - public static void waitForTableToBeOnline(final HBaseTestingUtility util, final byte[] tableName) + public static void waitForTableToBeOnline(final HBaseTestingUtility util, + final TableName tableName) throws IOException, InterruptedException { HRegionServer rs = util.getRSForFirstRegionInTable(tableName); List onlineRegions = rs.getOnlineRegions(tableName); @@ -455,7 +458,7 @@ public class SnapshotTestingUtils { util.getHBaseAdmin().isTableAvailable(tableName); } - public static void createTable(final HBaseTestingUtility util, final byte[] tableName, + public static void createTable(final HBaseTestingUtility util, final TableName tableName, final byte[]... families) throws IOException, InterruptedException { HTableDescriptor htd = new HTableDescriptor(tableName); for (byte[] family: families) { @@ -472,7 +475,7 @@ public class SnapshotTestingUtils { assertEquals(15, util.getHBaseAdmin().getTableRegions(tableName).size()); } - public static void loadData(final HBaseTestingUtility util, final byte[] tableName, int rows, + public static void loadData(final HBaseTestingUtility util, final TableName tableName, int rows, byte[]... families) throws IOException, InterruptedException { loadData(util, new HTable(util.getConfiguration(), tableName), rows, families); } @@ -499,7 +502,7 @@ public class SnapshotTestingUtils { } table.flushCommits(); - waitForTableToBeOnline(util, table.getTableName()); + waitForTableToBeOnline(util, table.getName()); } private static void putData(final HTable table, final byte[][] families, @@ -530,7 +533,7 @@ public class SnapshotTestingUtils { mfs.getFileSystem().delete(archiveDir, true); } - public static void verifyRowCount(final HBaseTestingUtility util, final byte[] tableName, + public static void verifyRowCount(final HBaseTestingUtility util, final TableName tableName, long expectedRows) throws IOException { HTable table = new HTable(util.getConfiguration(), tableName); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java index f772abf..2903403 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java @@ -37,15 +37,17 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.util.Bytes; @@ -71,7 +73,7 @@ public class TestExportSnapshot { private byte[] emptySnapshotName; private byte[] snapshotName; - private byte[] tableName; + private TableName tableName; private HBaseAdmin admin; @BeforeClass @@ -97,7 +99,7 @@ public class TestExportSnapshot { this.admin = TEST_UTIL.getHBaseAdmin(); long tid = System.currentTimeMillis(); - tableName = Bytes.toBytes("testtb-" + tid); + tableName = TableName.valueOf("testtb-" + tid); snapshotName = Bytes.toBytes("snaptb0-" + tid); emptySnapshotName = Bytes.toBytes("emptySnaptb0-" + tid); @@ -183,13 +185,15 @@ public class TestExportSnapshot { public void testSnapshotWithRefsExportFileSystemState() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); - final byte[] tableWithRefsName = Bytes.toBytes("tableWithRefs"); + final TableName tableWithRefsName = + TableName.valueOf("tableWithRefs"); final String snapshotName = "tableWithRefs"; final String TEST_FAMILY = Bytes.toString(FAMILY); final String TEST_HFILE = "abc"; final SnapshotDescription sd = SnapshotDescription.newBuilder() - .setName(snapshotName).setTable(Bytes.toString(tableWithRefsName)).build(); + .setName(snapshotName) + .setTable(tableWithRefsName.getNameAsString()).build(); FileSystem fs = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); Path rootDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); @@ -198,7 +202,7 @@ public class TestExportSnapshot { // First region, simple with one plain hfile. HRegionInfo hri = new HRegionInfo(tableWithRefsName); HRegionFileSystem r0fs = HRegionFileSystem.createRegionOnFileSystem(conf, - fs, new Path(archiveDir, hri.getTableNameAsString()), hri); + fs, FSUtils.getTableDir(archiveDir, hri.getTableName()), hri); Path storeFile = new Path(rootDir, TEST_HFILE); FSDataOutputStream out = fs.create(storeFile); out.write(Bytes.toBytes("Test Data")); @@ -209,14 +213,14 @@ public class TestExportSnapshot { // This region contains a reference to the hfile in the first region. hri = new HRegionInfo(tableWithRefsName); HRegionFileSystem r1fs = HRegionFileSystem.createRegionOnFileSystem(conf, - fs, new Path(archiveDir, hri.getTableNameAsString()), hri); + fs, new Path(archiveDir, hri.getTableName().getNameAsString()), hri); storeFile = new Path(rootDir, TEST_HFILE + '.' + r0fs.getRegionInfo().getEncodedName()); out = fs.create(storeFile); out.write(Bytes.toBytes("Test Data")); out.close(); r1fs.commitStoreFile(TEST_FAMILY, storeFile); - Path tableDir = HTableDescriptor.getTableDir(archiveDir, tableWithRefsName); + Path tableDir = FSUtils.getTableDir(archiveDir, tableWithRefsName); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); FileUtil.copy(fs, tableDir, fs, snapshotDir, false, conf); SnapshotDescriptionUtils.writeSnapshotInfo(sd, snapshotDir, fs); @@ -227,7 +231,7 @@ public class TestExportSnapshot { /** * Test ExportSnapshot */ - private void testExportFileSystemState(final byte[] tableName, final byte[] snapshotName, + private void testExportFileSystemState(final TableName tableName, final byte[] snapshotName, int filesExpected) throws Exception { Path copyDir = TEST_UTIL.getDataTestDir("export-" + System.currentTimeMillis()); URI hdfsUri = FileSystem.get(TEST_UTIL.getConfiguration()).getUri(); @@ -276,7 +280,7 @@ public class TestExportSnapshot { * Verify if the files exists */ private void verifyArchive(final FileSystem fs, final Path rootDir, - final byte[] tableName, final String snapshotName) throws IOException { + final TableName tableName, final String snapshotName) throws IOException { final Path exportedSnapshot = new Path(rootDir, new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName)); final Path exportedArchive = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY); @@ -286,13 +290,14 @@ public class TestExportSnapshot { public void storeFile (final String region, final String family, final String hfile) throws IOException { verifyNonEmptyFile(new Path(exportedArchive, - new Path(Bytes.toString(tableName), new Path(region, new Path(family, hfile))))); + new Path(FSUtils.getTableDir(new Path("./"), tableName), + new Path(region, new Path(family, hfile))))); } public void recoveredEdits (final String region, final String logfile) throws IOException { verifyNonEmptyFile(new Path(exportedSnapshot, - new Path(Bytes.toString(tableName), new Path(region, logfile)))); + new Path(tableName.getNameAsString(), new Path(region, logfile)))); } public void logFile (final String server, final String logfile) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java index 87f22e2..08847eb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java @@ -35,14 +35,14 @@ import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -52,13 +52,11 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; -import org.apache.hadoop.hbase.util.MD5Hash; import org.apache.log4j.Level; import org.junit.After; import org.junit.AfterClass; @@ -83,7 +81,8 @@ public class TestFlushSnapshotFromClient { private static final String STRING_TABLE_NAME = "test"; private static final byte[] TEST_FAM = Bytes.toBytes("fam"); private static final byte[] TEST_QUAL = Bytes.toBytes("q"); - private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME); + private static final TableName TABLE_NAME = + TableName.valueOf(STRING_TABLE_NAME); private final int DEFAULT_NUM_ROWS = 100; /** @@ -163,7 +162,7 @@ public class TestFlushSnapshotFromClient { LOG.debug("FS state before snapshot:"); FSUtils.logFileSystemState(UTIL.getTestFileSystem(), - FSUtils.getRootDir(UTIL.getConfiguration()), LOG); + FSUtils.getRootDir(UTIL.getConfiguration()), LOG); // take a snapshot of the enabled table String snapshotString = "offlineTableSnapshot"; @@ -180,10 +179,10 @@ public class TestFlushSnapshotFromClient { Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); LOG.debug("FS state after snapshot:"); FSUtils.logFileSystemState(UTIL.getTestFileSystem(), - FSUtils.getRootDir(UTIL.getConfiguration()), LOG); + FSUtils.getRootDir(UTIL.getConfiguration()), LOG); SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir, - admin, fs, false, new Path(rootDir, HConstants.HREGION_LOGDIR_NAME), snapshotServers); + admin, fs, false, new Path(rootDir, HConstants.HREGION_LOGDIR_NAME), snapshotServers); } @Test (timeout=300000) @@ -219,7 +218,9 @@ public class TestFlushSnapshotFromClient { public void testAsyncFlushSnapshot() throws Exception { HBaseAdmin admin = UTIL.getHBaseAdmin(); SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("asyncSnapshot") - .setTable(STRING_TABLE_NAME).setType(SnapshotDescription.Type.FLUSH).build(); + .setTable(TABLE_NAME.getNameAsString()) + .setType(SnapshotDescription.Type.FLUSH) + .build(); // take the snapshot async admin.takeSnapshotAsync(snapshot); @@ -250,7 +251,7 @@ public class TestFlushSnapshotFromClient { // Clone the table String cloneBeforeMergeName = "cloneBeforeMerge"; admin.cloneSnapshot(snapshotBeforeMergeName, cloneBeforeMergeName); - SnapshotTestingUtils.waitForTableToBeOnline(UTIL, Bytes.toBytes(cloneBeforeMergeName)); + SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TableName.valueOf(cloneBeforeMergeName)); // Merge two regions List regions = admin.getTableRegions(TABLE_NAME); @@ -274,11 +275,11 @@ public class TestFlushSnapshotFromClient { // Clone the table String cloneAfterMergeName = "cloneAfterMerge"; admin.cloneSnapshot(snapshotBeforeMergeName, cloneAfterMergeName); - SnapshotTestingUtils.waitForTableToBeOnline(UTIL, Bytes.toBytes(cloneAfterMergeName)); + SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TableName.valueOf(cloneAfterMergeName)); SnapshotTestingUtils.verifyRowCount(UTIL, TABLE_NAME, numRows); - SnapshotTestingUtils.verifyRowCount(UTIL, Bytes.toBytes(cloneBeforeMergeName), numRows); - SnapshotTestingUtils.verifyRowCount(UTIL, Bytes.toBytes(cloneAfterMergeName), numRows); + SnapshotTestingUtils.verifyRowCount(UTIL, TableName.valueOf(cloneBeforeMergeName), numRows); + SnapshotTestingUtils.verifyRowCount(UTIL, TableName.valueOf(cloneAfterMergeName), numRows); // test that we can delete the snapshot UTIL.deleteTable(cloneAfterMergeName); @@ -319,10 +320,10 @@ public class TestFlushSnapshotFromClient { // Clone the table String cloneName = "cloneMerge"; admin.cloneSnapshot(snapshotName, cloneName); - SnapshotTestingUtils.waitForTableToBeOnline(UTIL, Bytes.toBytes(cloneName)); + SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TableName.valueOf(cloneName)); SnapshotTestingUtils.verifyRowCount(UTIL, TABLE_NAME, numRows); - SnapshotTestingUtils.verifyRowCount(UTIL, Bytes.toBytes(cloneName), numRows); + SnapshotTestingUtils.verifyRowCount(UTIL, TableName.valueOf(cloneName), numRows); // test that we can delete the snapshot UTIL.deleteTable(cloneName); @@ -347,7 +348,7 @@ public class TestFlushSnapshotFromClient { // make sure we only have 1 matching snapshot List snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, - snapshotName, STRING_TABLE_NAME); + snapshotName, TABLE_NAME); // check the directory structure FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); @@ -360,9 +361,9 @@ public class TestFlushSnapshotFromClient { // check the table info HTableDescriptor desc = FSTableDescriptors.getTableDescriptorFromFs(fs, - rootDir,STRING_TABLE_NAME); + rootDir, TABLE_NAME); HTableDescriptor snapshotDesc = FSTableDescriptors.getTableDescriptorFromFs(fs, - SnapshotDescriptionUtils.getSnapshotsDir(rootDir), snapshotName); + new Path(SnapshotDescriptionUtils.getSnapshotsDir(rootDir), snapshotName)); assertEquals(desc, snapshotDesc); // check the region snapshot for all the regions @@ -391,7 +392,8 @@ public class TestFlushSnapshotFromClient { @Test(timeout=300000) public void testConcurrentSnapshottingAttempts() throws IOException, InterruptedException { final String STRING_TABLE2_NAME = STRING_TABLE_NAME + "2"; - final byte[] TABLE2_NAME = Bytes.toBytes(STRING_TABLE2_NAME); + final TableName TABLE2_NAME = + TableName.valueOf(STRING_TABLE2_NAME); int ssNum = 20; HBaseAdmin admin = UTIL.getHBaseAdmin(); @@ -431,7 +433,7 @@ public class TestFlushSnapshotFromClient { SnapshotDescription[] descs = new SnapshotDescription[ssNum]; for (int i = 0; i < ssNum; i++) { SnapshotDescription.Builder builder = SnapshotDescription.newBuilder(); - builder.setTable((i % 2) == 0 ? STRING_TABLE_NAME : STRING_TABLE2_NAME); + builder.setTable(((i % 2) == 0 ? TABLE_NAME : TABLE2_NAME).getNameAsString()); builder.setName("ss"+i); builder.setType(SnapshotDescription.Type.FLUSH); descs[i] = builder.build(); @@ -477,9 +479,9 @@ public class TestFlushSnapshotFromClient { int t1SnapshotsCount = 0; int t2SnapshotsCount = 0; for (SnapshotDescription ss : taken) { - if (ss.getTable().equals(STRING_TABLE_NAME)) { + if (TableName.valueOf(ss.getTable()).equals(TABLE_NAME)) { t1SnapshotsCount++; - } else if (ss.getTable().equals(STRING_TABLE2_NAME)) { + } else if (TableName.valueOf(ss.getTable()).equals(TABLE2_NAME)) { t2SnapshotsCount++; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestReferenceRegionHFilesTask.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestReferenceRegionHFilesTask.java index 73c2aba..f11ea8a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestReferenceRegionHFilesTask.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestReferenceRegionHFilesTask.java @@ -29,9 +29,10 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; -import org.apache.hadoop.hbase.snapshot.ReferenceRegionHFilesTask; import org.apache.hadoop.hbase.util.FSUtils; import org.junit.Test; import org.junit.experimental.categories.Category; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java index c354bef..8f77a84 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.LargeTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; @@ -71,7 +72,7 @@ public class TestRestoreFlushSnapshotFromClient { private byte[] snapshotName2; private int snapshot0Rows; private int snapshot1Rows; - private byte[] tableName; + private TableName tableName; private HBaseAdmin admin; @BeforeClass @@ -106,7 +107,7 @@ public class TestRestoreFlushSnapshotFromClient { this.admin = UTIL.getHBaseAdmin(); long tid = System.currentTimeMillis(); - tableName = Bytes.toBytes("testtb-" + tid); + tableName = TableName.valueOf("testtb-" + tid); snapshotName0 = Bytes.toBytes("snaptb0-" + tid); snapshotName1 = Bytes.toBytes("snaptb1-" + tid); snapshotName2 = Bytes.toBytes("snaptb2-" + tid); @@ -121,7 +122,7 @@ public class TestRestoreFlushSnapshotFromClient { logFSTree(); // take a snapshot - admin.snapshot(Bytes.toString(snapshotName0), Bytes.toString(tableName), + admin.snapshot(Bytes.toString(snapshotName0), tableName, SnapshotDescription.Type.FLUSH); LOG.info("=== after snapshot with 500 rows"); @@ -134,7 +135,7 @@ public class TestRestoreFlushSnapshotFromClient { logFSTree(); // take a snapshot of the updated table - admin.snapshot(Bytes.toString(snapshotName1), Bytes.toString(tableName), + admin.snapshot(Bytes.toString(snapshotName1), tableName, SnapshotDescription.Type.FLUSH); LOG.info("=== after snapshot with 1000 rows"); logFSTree(); @@ -183,12 +184,12 @@ public class TestRestoreFlushSnapshotFromClient { @Test public void testCloneSnapshot() throws IOException, InterruptedException { - byte[] clonedTableName = Bytes.toBytes("clonedtb-" + System.currentTimeMillis()); + TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis()); testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows); testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows); } - private void testCloneSnapshot(final byte[] tableName, final byte[] snapshotName, + private void testCloneSnapshot(final TableName tableName, final byte[] snapshotName, int snapshotRows) throws IOException, InterruptedException { // create a new table from snapshot admin.cloneSnapshot(snapshotName, tableName); @@ -199,10 +200,10 @@ public class TestRestoreFlushSnapshotFromClient { @Test public void testRestoreSnapshotOfCloned() throws IOException, InterruptedException { - byte[] clonedTableName = Bytes.toBytes("clonedtb-" + System.currentTimeMillis()); + TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis()); admin.cloneSnapshot(snapshotName0, clonedTableName); SnapshotTestingUtils.verifyRowCount(UTIL, clonedTableName, snapshot0Rows); - admin.snapshot(Bytes.toString(snapshotName2), Bytes.toString(clonedTableName), SnapshotDescription.Type.FLUSH); + admin.snapshot(Bytes.toString(snapshotName2), clonedTableName, SnapshotDescription.Type.FLUSH); UTIL.deleteTable(clonedTableName); admin.cloneSnapshot(snapshotName2, clonedTableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java index 8c2f384..1c862a0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java @@ -38,10 +38,12 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.monitoring.MonitoredTask; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; @@ -92,23 +94,23 @@ public class TestRestoreSnapshotHelper { // Test clone a snapshot HTableDescriptor htdClone = createTableDescriptor("testtb-clone"); - testRestore(snapshotDir, htd.getNameAsString(), htdClone); + testRestore(snapshotDir, htd.getTableName().getNameAsString(), htdClone); verifyRestore(rootDir, htd, htdClone); // Test clone a clone ("link to link") - Path cloneDir = HTableDescriptor.getTableDir(rootDir, htdClone.getName()); + Path cloneDir = FSUtils.getTableDir(rootDir, htdClone.getTableName()); HTableDescriptor htdClone2 = createTableDescriptor("testtb-clone2"); - testRestore(cloneDir, htdClone.getNameAsString(), htdClone2); + testRestore(cloneDir, htdClone.getTableName().getNameAsString(), htdClone2); verifyRestore(rootDir, htd, htdClone2); } private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd, final HTableDescriptor htdClone) throws IOException { - String[] files = getHFiles(HTableDescriptor.getTableDir(rootDir, htdClone.getName())); + String[] files = getHFiles(FSUtils.getTableDir(rootDir, htdClone.getTableName())); assertEquals(2, files.length); assertTrue(files[0] + " should be a HFileLink", HFileLink.isHFileLink(files[0])); assertTrue(files[1] + " should be a Referene", StoreFileInfo.isReference(files[1])); - assertEquals(sourceHtd.getNameAsString(), HFileLink.getReferencedTableName(files[0])); + assertEquals(sourceHtd.getTableName(), HFileLink.getReferencedTableName(files[0])); assertEquals(TEST_HFILE, HFileLink.getReferencedHFileName(files[0])); Path refPath = getReferredToFile(files[1]); assertTrue(refPath.getName() + " should be a HFileLink", HFileLink.isHFileLink(refPath.getName())); @@ -123,14 +125,14 @@ public class TestRestoreSnapshotHelper { */ public void testRestore(final Path snapshotDir, final String sourceTableName, final HTableDescriptor htdClone) throws IOException { - LOG.debug("pre-restore table=" + htdClone.getNameAsString() + " snapshot=" + snapshotDir); + LOG.debug("pre-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir); FSUtils.logFileSystemState(fs, rootDir, LOG); new FSTableDescriptors(conf).createTableDescriptor(htdClone); RestoreSnapshotHelper helper = getRestoreHelper(rootDir, snapshotDir, sourceTableName, htdClone); helper.restoreHdfsRegions(); - LOG.debug("post-restore table=" + htdClone.getNameAsString() + " snapshot=" + snapshotDir); + LOG.debug("post-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir); FSUtils.logFileSystemState(fs, rootDir, LOG); } @@ -145,37 +147,39 @@ public class TestRestoreSnapshotHelper { MonitoredTask status = Mockito.mock(MonitoredTask.class); SnapshotDescription sd = SnapshotDescription.newBuilder() - .setName("snapshot").setTable(sourceTableName).build(); + .setName("snapshot") + .setTable(sourceTableName) + .build(); return new RestoreSnapshotHelper(conf, fs, sd, snapshotDir, - htdClone, HTableDescriptor.getTableDir(rootDir, htdClone.getName()), monitor, status); + htdClone, rootDir, monitor, status); } private void createSnapshot(final Path rootDir, final Path snapshotDir, final HTableDescriptor htd) throws IOException { // First region, simple with one plain hfile. - HRegionInfo hri = new HRegionInfo(htd.getName()); + HRegionInfo hri = new HRegionInfo(htd.getTableName()); HRegionFileSystem r0fs = HRegionFileSystem.createRegionOnFileSystem(conf, - fs, new Path(archiveDir, hri.getTableNameAsString()), hri); + fs, FSUtils.getTableDir(archiveDir, hri.getTableName()), hri); Path storeFile = new Path(rootDir, TEST_HFILE); fs.createNewFile(storeFile); r0fs.commitStoreFile(TEST_FAMILY, storeFile); // Second region, used to test the split case. // This region contains a reference to the hfile in the first region. - hri = new HRegionInfo(htd.getName()); + hri = new HRegionInfo(htd.getTableName()); HRegionFileSystem r1fs = HRegionFileSystem.createRegionOnFileSystem(conf, - fs, new Path(archiveDir, hri.getTableNameAsString()), hri); + fs, FSUtils.getTableDir(archiveDir, hri.getTableName()), hri); storeFile = new Path(rootDir, TEST_HFILE + '.' + r0fs.getRegionInfo().getEncodedName()); fs.createNewFile(storeFile); r1fs.commitStoreFile(TEST_FAMILY, storeFile); - Path tableDir = HTableDescriptor.getTableDir(archiveDir, htd.getName()); + Path tableDir = FSUtils.getTableDir(archiveDir, htd.getTableName()); FileUtil.copy(fs, tableDir, fs, snapshotDir, false, conf); } private HTableDescriptor createTableDescriptor(final String tableName) { - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); return htd; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotLogSplitter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotLogSplitter.java index b99c8ff..99715f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotLogSplitter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotLogSplitter.java @@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; @@ -41,7 +42,6 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; -import org.apache.hadoop.hbase.snapshot.SnapshotLogSplitter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.junit.*; @@ -84,7 +84,7 @@ public class TestSnapshotLogSplitter { @Test public void testSplitLogsOnDifferentTable() throws IOException { - byte[] tableName = getTableName(1); + TableName tableName = getTableName(1); Map regionsMap = new TreeMap(Bytes.BYTES_COMPARATOR); for (int j = 0; j < 10; ++j) { byte[] regionName = getRegionName(tableName, j); @@ -97,9 +97,9 @@ public class TestSnapshotLogSplitter { /* * Split and verify test logs for the specified table */ - private void splitTestLogs(final byte[] tableName, final Map regionsMap) - throws IOException { - Path tableDir = new Path(TEST_UTIL.getDataTestDir(), Bytes.toString(tableName)); + private void splitTestLogs(final TableName tableName, + final Map regionsMap) throws IOException { + Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDataTestDir(), tableName); SnapshotLogSplitter logSplitter = new SnapshotLogSplitter(conf, fs, tableDir, tableName, regionsMap); try { @@ -113,10 +113,10 @@ public class TestSnapshotLogSplitter { /* * Verify that every logs in the table directory has just the specified table and regions. */ - private void verifyRecoverEdits(final Path tableDir, final byte[] tableName, + private void verifyRecoverEdits(final Path tableDir, final TableName tableName, final Map regionsMap) throws IOException { for (FileStatus regionStatus: FSUtils.listStatus(fs, tableDir)) { - assertTrue(regionStatus.getPath().getName().startsWith(Bytes.toString(tableName))); + assertTrue(regionStatus.getPath().getName().startsWith(tableName.getNameAsString())); Path regionEdits = HLogUtil.getRegionDirRecoveredEditsDir(regionStatus.getPath()); byte[] regionName = Bytes.toBytes(regionStatus.getPath().getName()); assertFalse(regionsMap.containsKey(regionName)); @@ -126,7 +126,7 @@ public class TestSnapshotLogSplitter { HLog.Entry entry; while ((entry = reader.next()) != null) { HLogKey key = entry.getKey(); - assertArrayEquals(tableName, key.getTablename()); + assertEquals(tableName, key.getTablename()); assertArrayEquals(regionName, key.getEncodedRegionName()); } } finally { @@ -147,7 +147,7 @@ public class TestSnapshotLogSplitter { HLog.Writer writer = HLogFactory.createWriter(fs, logFile, conf); try { for (int i = 0; i < 7; ++i) { - byte[] tableName = getTableName(i); + TableName tableName = getTableName(i); for (int j = 0; j < 10; ++j) { byte[] regionName = getRegionName(tableName, j); for (int k = 0; k < 50; ++k) { @@ -165,15 +165,15 @@ public class TestSnapshotLogSplitter { } } - private byte[] getTableName(int tableId) { - return Bytes.toBytes("testtb-" + tableId); + private TableName getTableName(int tableId) { + return TableName.valueOf("testtb-" + tableId); } - private byte[] getRegionName(final byte[] tableName, int regionId) { - return Bytes.toBytes(Bytes.toString(tableName) + "-region-" + regionId); + private byte[] getRegionName(final TableName tableName, int regionId) { + return Bytes.toBytes(tableName + "-region-" + regionId); } - private byte[] getNewRegionName(final byte[] tableName, int regionId) { - return Bytes.toBytes(Bytes.toString(tableName) + "-new-region-" + regionId); + private byte[] getNewRegionName(final TableName tableName, int regionId) { + return Bytes.toBytes(tableName + "-new-region-" + regionId); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotTask.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotTask.java index 36b7050..760ac14 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotTask.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotTask.java @@ -18,15 +18,15 @@ package org.apache.hadoop.hbase.snapshot; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; -import org.apache.hadoop.hbase.snapshot.SnapshotTask; import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java index 0f9aee9..96d7177 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java @@ -32,6 +32,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompatibilityFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -51,11 +52,6 @@ import org.apache.hadoop.hbase.thrift.generated.TRegionInfo; import org.apache.hadoop.hbase.thrift.generated.TRowResult; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.metrics.ContextFactory; -import org.apache.hadoop.metrics.MetricsContext; -import org.apache.hadoop.metrics.MetricsUtil; -import org.apache.hadoop.metrics.spi.NoEmitMetricsContext; -import org.apache.hadoop.metrics.spi.OutputRecord; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -580,7 +576,8 @@ public class TestThriftServer { try { handler.mutateRow(tableAname, rowAname, getMutations(), null); byte[] searchRow = HRegionInfo.createRegionName( - tableAname.array(), rowAname.array(), HConstants.NINES, false); + TableName.valueOf(tableAname.array()), rowAname.array(), + HConstants.NINES, false); TRegionInfo regionInfo = handler.getRegionInfo(ByteBuffer.wrap(searchRow)); assertTrue(Bytes.toStringBinary(regionInfo.getName()).startsWith( Bytes.toStringBinary(tableAname))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java index 36be084..e56a22e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Put; @@ -130,7 +131,7 @@ public class TestThriftHBaseServiceHandler { public static void beforeClass() throws Exception { UTIL.startMiniCluster(); HBaseAdmin admin = new HBaseAdmin(UTIL.getConfiguration()); - HTableDescriptor tableDescriptor = new HTableDescriptor(tableAname); + HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(tableAname)); for (HColumnDescriptor family : families) { tableDescriptor.addFamily(family); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java index bbcd7e9..386fea6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java @@ -32,11 +32,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.Store; -import org.apache.hadoop.hbase.regionserver.HStore; /** * Test helper for testing archiving of HFiles @@ -212,9 +210,11 @@ public class HFileArchiveTestingUtil { * @param region region that is being archived * @return {@link Path} to the archive directory for the given region */ - public static Path getRegionArchiveDir(Configuration conf, HRegion region) { - return HFileArchiveUtil.getRegionArchiveDir(region.getRegionFileSystem().getTableDir(), - region.getRegionFileSystem().getRegionDir()); + public static Path getRegionArchiveDir(Configuration conf, HRegion region) throws IOException { + return HFileArchiveUtil.getRegionArchiveDir( + FSUtils.getRootDir(conf), + region.getTableDesc().getTableName(), + region.getRegionInfo().getEncodedName()); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java index eb7fb46..7a21c63 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java @@ -27,6 +27,7 @@ import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.ParseException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -50,7 +51,7 @@ public class LoadTestTool extends AbstractHBaseTool { private static final Log LOG = LogFactory.getLog(LoadTestTool.class); /** Table name for the test */ - protected byte[] tableName; + private TableName tableName; /** Table name to use of not overridden on the command line */ protected static final String DEFAULT_TABLE_NAME = "cluster_test"; @@ -169,11 +170,11 @@ public class LoadTestTool extends AbstractHBaseTool { * Apply column family options such as Bloom filters, compression, and data * block encoding. */ - protected void applyColumnFamilyOptions(byte[] tableName, + protected void applyColumnFamilyOptions(TableName tableName, byte[][] columnFamilies) throws IOException { HBaseAdmin admin = new HBaseAdmin(conf); HTableDescriptor tableDesc = admin.getTableDescriptor(tableName); - LOG.info("Disabling table " + Bytes.toString(tableName)); + LOG.info("Disabling table " + tableName); admin.disableTable(tableName); for (byte[] cf : columnFamilies) { HColumnDescriptor columnDesc = tableDesc.getFamily(cf); @@ -200,7 +201,7 @@ public class LoadTestTool extends AbstractHBaseTool { admin.modifyColumn(tableName, columnDesc); } } - LOG.info("Enabling table " + Bytes.toString(tableName)); + LOG.info("Enabling table " + tableName); admin.enableTable(tableName); } @@ -244,7 +245,7 @@ public class LoadTestTool extends AbstractHBaseTool { protected void processOptions(CommandLine cmd) { this.cmd = cmd; - tableName = Bytes.toBytes(cmd.getOptionValue(OPT_TABLE_NAME, + tableName = TableName.valueOf(cmd.getOptionValue(OPT_TABLE_NAME, DEFAULT_TABLE_NAME)); isWrite = cmd.hasOption(OPT_WRITE); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java index 8975cb0..dcc534c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockRegionServerServices.java @@ -26,6 +26,7 @@ import java.util.concurrent.ConcurrentSkipListMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.CatalogTracker; @@ -79,7 +80,7 @@ public class MockRegionServerServices implements RegionServerServices { return this.regions.get(encodedRegionName); } - public List getOnlineRegions(byte[] tableName) throws IOException { + public List getOnlineRegions(TableName tableName) throws IOException { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java index 72f7a90..7f04d22 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; import java.util.Collection; import java.util.Map; -import java.util.NavigableMap; import java.util.Random; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; @@ -28,6 +27,7 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator; import org.apache.hadoop.hbase.util.test.LoadTestKVGenerator; @@ -40,7 +40,7 @@ import org.apache.hadoop.util.StringUtils; public abstract class MultiThreadedAction { private static final Log LOG = LogFactory.getLog(MultiThreadedAction.class); - protected final byte[] tableName; + protected final TableName tableName; protected final Configuration conf; protected int numThreads = 1; @@ -129,8 +129,9 @@ public abstract class MultiThreadedAction { public static final int REPORTING_INTERVAL_MS = 5000; - public MultiThreadedAction(LoadTestDataGenerator dataGen, Configuration conf, byte[] tableName, - String actionLetter) { + public MultiThreadedAction(LoadTestDataGenerator dataGen, Configuration conf, + TableName tableName, + String actionLetter) { this.conf = conf; this.dataGenerator = dataGen; this.tableName = tableName; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java index eb0c268..d27bc62 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; import java.util.HashSet; -import java.util.List; -import java.util.Map; import java.util.Random; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; @@ -27,13 +25,12 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator; -import org.apache.hadoop.hbase.util.test.LoadTestKVGenerator; /** Creates multiple threads that read and verify previously written data */ public class MultiThreadedReader extends MultiThreadedAction @@ -75,7 +72,7 @@ public class MultiThreadedReader extends MultiThreadedAction private int keyWindow = DEFAULT_KEY_WINDOW; public MultiThreadedReader(LoadTestDataGenerator dataGen, Configuration conf, - byte[] tableName, double verifyPercent) { + TableName tableName, double verifyPercent) { super(dataGen, conf, tableName, "R"); this.verifyPercent = verifyPercent; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java index 41ba126..24b63bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.io.PrintWriter; import java.io.StringWriter; import java.util.HashSet; -import java.util.Map; import java.util.PriorityQueue; import java.util.Queue; import java.util.Set; @@ -35,13 +34,12 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; -import org.apache.hadoop.hbase.util.test.LoadTestKVGenerator; -import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator; /** Creates multiple threads that write key/values into the */ @@ -85,7 +83,7 @@ public class MultiThreadedWriter extends MultiThreadedAction { private boolean trackInsertedKeys; public MultiThreadedWriter(LoadTestDataGenerator dataGen, Configuration conf, - byte[] tableName) { + TableName tableName) { super(dataGen, conf, tableName, "W"); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java index bcc282c..ad0b936 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -171,7 +172,7 @@ public class ProcessBasedLocalHBaseCluster { int attemptsLeft = 10; while (attemptsLeft-- > 0) { try { - new HTable(conf, HConstants.META_TABLE_NAME); + new HTable(conf, TableName.META_TABLE_NAME); } catch (Exception e) { LOG.info("Waiting for HBase to startup. Retries left: " + attemptsLeft, e); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java index 421db05..0aafadb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java @@ -21,6 +21,7 @@ import java.io.IOException; import org.apache.commons.cli.CommandLine; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.HTable; @@ -44,7 +45,8 @@ public class RestartMetaTest extends AbstractHBaseTool { private static final int DEFAULT_NUM_RS = 2; /** Table name for the test */ - private static byte[] TABLE_NAME = Bytes.toBytes("load_test"); + private static TableName TABLE_NAME = + TableName.valueOf("load_test"); /** The number of seconds to sleep after loading the data */ private static final int SLEEP_SEC_AFTER_DATA_LOAD = 5; @@ -118,7 +120,7 @@ public class RestartMetaTest extends AbstractHBaseTool { LOG.debug("Trying to scan meta"); - HTable metaTable = new HTable(conf, HConstants.META_TABLE_NAME); + HTable metaTable = new HTable(conf, TableName.META_TABLE_NAME); ResultScanner scanner = metaTable.getScanner(new Scan()); Result result; while ((result = scanner.next()) != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TableName.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TableName.java deleted file mode 100644 index e2c5ece..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TableName.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.util; - -import org.junit.rules.TestWatcher; -import org.junit.runner.Description; - -/** - * Returns a {@code byte[]} containing the name of the currently running test method. - */ -public class TableName extends TestWatcher { - private byte[] tableName; - - /** - * Invoked when a test is about to start - */ - @Override - protected void starting(Description description) { - tableName = Bytes.toBytes(description.getMethodName()); - } - - public byte[] getTableName() { - return tableName; - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java index ef9182c..94e009e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java @@ -33,6 +33,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -57,7 +58,6 @@ import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -import org.apache.hadoop.hbase.util.Bytes; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -101,7 +101,8 @@ public class TestCoprocessorScanPolicy { @Test public void testBaseCases() throws Exception { - byte[] tableName = Bytes.toBytes("baseCases"); + TableName tableName = + TableName.valueOf("baseCases"); if (TEST_UTIL.getHBaseAdmin().tableExists(tableName)) { TEST_UTIL.deleteTable(tableName); } @@ -109,7 +110,7 @@ public class TestCoprocessorScanPolicy { // set the version override to 2 Put p = new Put(R); p.setAttribute("versions", new byte[]{}); - p.add(F, tableName, Bytes.toBytes(2)); + p.add(F, tableName.getName(), Bytes.toBytes(2)); t.put(p); long now = EnvironmentEdgeManager.currentTimeMillis(); @@ -150,7 +151,8 @@ public class TestCoprocessorScanPolicy { @Test public void testTTL() throws Exception { - byte[] tableName = Bytes.toBytes("testTTL"); + TableName tableName = + TableName.valueOf("testTTL"); if (TEST_UTIL.getHBaseAdmin().tableExists(tableName)) { TEST_UTIL.deleteTable(tableName); } @@ -170,7 +172,7 @@ public class TestCoprocessorScanPolicy { // Set the TTL override to 3s Put p = new Put(R); p.setAttribute("ttl", new byte[]{}); - p.add(F, tableName, Bytes.toBytes(3000L)); + p.add(F, tableName.getName(), Bytes.toBytes(3000L)); t.put(p); p = new Put(R); @@ -209,8 +211,10 @@ public class TestCoprocessorScanPolicy { } public static class ScanObserver extends BaseRegionObserver { - private Map ttls = new HashMap(); - private Map versions = new HashMap(); + private Map ttls = + new HashMap(); + private Map versions = + new HashMap(); // lame way to communicate with the coprocessor, // since it is loaded by a different class loader @@ -220,12 +224,12 @@ public class TestCoprocessorScanPolicy { if (put.getAttribute("ttl") != null) { Cell cell = put.getFamilyMap().values().iterator().next().get(0); KeyValue kv = KeyValueUtil.ensureKeyValue(cell); - ttls.put(Bytes.toString(kv.getQualifier()), Bytes.toLong(kv.getValue())); + ttls.put(TableName.valueOf(kv.getQualifier()), Bytes.toLong(kv.getValue())); c.bypass(); } else if (put.getAttribute("versions") != null) { Cell cell = put.getFamilyMap().values().iterator().next().get(0); KeyValue kv = KeyValueUtil.ensureKeyValue(cell); - versions.put(Bytes.toString(kv.getQualifier()), Bytes.toInt(kv.getValue())); + versions.put(TableName.valueOf(kv.getQualifier()), Bytes.toInt(kv.getValue())); c.bypass(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index 3d4caf5..6b38680 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -34,6 +34,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -67,7 +68,7 @@ public class TestFSTableDescriptors { @Test public void testCreateAndUpdate() throws IOException { Path testdir = UTIL.getDataTestDir("testCreateAndUpdate"); - HTableDescriptor htd = new HTableDescriptor("testCreate"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testCreate")); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir); assertTrue(fstd.createTableDescriptor(htd)); @@ -79,7 +80,7 @@ public class TestFSTableDescriptors { } statuses = fs.listStatus(testdir); assertTrue(statuses.length == 1); - Path tmpTableDir = new Path(FSUtils.getTablePath(testdir, htd.getName()), ".tmp"); + Path tmpTableDir = new Path(FSUtils.getTableDir(testdir, htd.getTableName()), ".tmp"); statuses = fs.listStatus(tmpTableDir); assertTrue(statuses.length == 0); } @@ -87,7 +88,8 @@ public class TestFSTableDescriptors { @Test public void testSequenceIdAdvancesOnTableInfo() throws IOException { Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo"); - HTableDescriptor htd = new HTableDescriptor("testSequenceidAdvancesOnTableInfo"); + HTableDescriptor htd = new HTableDescriptor( + TableName.valueOf("testSequenceidAdvancesOnTableInfo")); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir); Path p0 = fstd.updateTableDescriptor(htd); @@ -143,21 +145,21 @@ public class TestFSTableDescriptors { // Cleanup old tests if any detrius laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); TableDescriptors htds = new FSTableDescriptors(fs, rootdir); - HTableDescriptor htd = new HTableDescriptor(name); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); htds.add(htd); - assertNotNull(htds.remove(htd.getNameAsString())); - assertNull(htds.remove(htd.getNameAsString())); + assertNotNull(htds.remove(htd.getTableName())); + assertNull(htds.remove(htd.getTableName())); } @Test public void testReadingHTDFromFS() throws IOException { final String name = "testReadingHTDFromFS"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - HTableDescriptor htd = new HTableDescriptor(name); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); Path rootdir = UTIL.getDataTestDir(name); FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir); fstd.createTableDescriptor(htd); HTableDescriptor htd2 = - FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getNameAsString()); + FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getTableName()); assertTrue(htd.equals(htd2)); } @@ -169,9 +171,9 @@ public class TestFSTableDescriptors { Path rootdir = new Path(UTIL.getDataTestDir(), name); FSTableDescriptors htds = new FSTableDescriptors(fs, rootdir) { @Override - public HTableDescriptor get(byte[] tablename) + public HTableDescriptor get(TableName tablename) throws TableExistsException, FileNotFoundException, IOException { - LOG.info(Bytes.toString(tablename) + ", cachehits=" + this.cachehits); + LOG.info(tablename + ", cachehits=" + this.cachehits); return super.get(tablename); } }; @@ -183,29 +185,29 @@ public class TestFSTableDescriptors { } for (int i = 0; i < count; i++) { - assertTrue(htds.get(Bytes.toBytes(name + i)) != null); + assertTrue(htds.get(TableName.valueOf(name + i)) != null); } for (int i = 0; i < count; i++) { - assertTrue(htds.get(Bytes.toBytes(name + i)) != null); + assertTrue(htds.get(TableName.valueOf(name + i)) != null); } // Update the table infos for (int i = 0; i < count; i++) { - HTableDescriptor htd = new HTableDescriptor(name + i); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i)); htd.addFamily(new HColumnDescriptor("" + i)); htds.updateTableDescriptor(htd); } // Wait a while so mod time we write is for sure different. Thread.sleep(100); for (int i = 0; i < count; i++) { - assertTrue(htds.get(Bytes.toBytes(name + i)) != null); + assertTrue(htds.get(TableName.valueOf(name + i)) != null); } for (int i = 0; i < count; i++) { - assertTrue(htds.get(Bytes.toBytes(name + i)) != null); + assertTrue(htds.get(TableName.valueOf(name + i)) != null); } assertEquals(count * 4, htds.invocations); assertTrue("expected=" + (count * 2) + ", actual=" + htds.cachehits, htds.cachehits >= (count * 2)); - assertTrue(htds.get(HConstants.ROOT_TABLE_NAME) != null); + assertTrue(htds.get(TableName.ROOT_TABLE_NAME) != null); assertEquals(htds.invocations, count * 4 + 1); assertTrue("expected=" + ((count * 2) + 1) + ", actual=" + htds.cachehits, htds.cachehits >= ((count * 2) + 1)); @@ -219,7 +221,7 @@ public class TestFSTableDescriptors { Path rootdir = new Path(UTIL.getDataTestDir(), name); TableDescriptors htds = new FSTableDescriptors(fs, rootdir); assertNull("There shouldn't be any HTD for this table", - htds.get("NoSuchTable")); + htds.get(TableName.valueOf("NoSuchTable"))); } @Test @@ -229,7 +231,7 @@ public class TestFSTableDescriptors { // Cleanup old tests if any detrius laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); TableDescriptors htds = new FSTableDescriptors(fs, rootdir); - HTableDescriptor htd = new HTableDescriptor(name); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); htds.add(htd); htds.add(htd); htds.add(htd); @@ -266,9 +268,9 @@ public class TestFSTableDescriptors { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); try { new FSTableDescriptors(fs, FSUtils.getRootDir(UTIL.getConfiguration())) - .get(HConstants.HFILE_ARCHIVE_DIRECTORY); + .get(TableName.valueOf(HConstants.HFILE_ARCHIVE_DIRECTORY)); fail("Shouldn't be able to read a table descriptor for the archive directory."); - } catch (IOException e) { + } catch (Exception e) { LOG.debug("Correctly got error when reading a table descriptor from the archive directory: " + e.getMessage()); } @@ -277,15 +279,15 @@ public class TestFSTableDescriptors { @Test public void testCreateTableDescriptorUpdatesIfExistsAlready() throws IOException { Path testdir = UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready"); - HTableDescriptor htd = new HTableDescriptor( - "testCreateTableDescriptorUpdatesIfThereExistsAlready"); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf( + "testCreateTableDescriptorUpdatesIfThereExistsAlready")); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir); assertTrue(fstd.createTableDescriptor(htd)); assertFalse(fstd.createTableDescriptor(htd)); htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue")); assertTrue(fstd.createTableDescriptor(htd)); //this will re-create - Path tableDir = fstd.getTableDirectory(htd.getNameAsString()); + Path tableDir = fstd.getTableDir(htd.getTableName()); Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR); FileStatus[] statuses = fs.listStatus(tmpTableDir); assertTrue(statuses.length == 0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index 6b52318..7c20665 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -49,6 +50,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -66,6 +68,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.MetaScanner; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -152,32 +155,37 @@ public class TestHBaseFsck { // Now let's mess it up and change the assignment in .META. to // point to a different region server - HTable meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getName(), executorService); - ResultScanner scanner = meta.getScanner(new Scan()); + HTable meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName(), + executorService); + Scan scan = new Scan(); + scan.setStartRow(Bytes.toBytes(table+",,")); + ResultScanner scanner = meta.getScanner(scan); HRegionInfo hri = null; - resforloop: - for (Result res : scanner) { - long startCode = Bytes.toLong(res.getValue(HConstants.CATALOG_FAMILY, - HConstants.STARTCODE_QUALIFIER)); - - for (JVMClusterUtil.RegionServerThread rs : - TEST_UTIL.getHBaseCluster().getRegionServerThreads()) { - - ServerName sn = rs.getRegionServer().getServerName(); - - // When we find a diff RS, change the assignment and break - if (startCode != sn.getStartcode()) { - Put put = new Put(res.getRow()); - put.setDurability(Durability.SKIP_WAL); - put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, - Bytes.toBytes(sn.getHostAndPort())); - put.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, - Bytes.toBytes(sn.getStartcode())); - meta.put(put); - hri = HRegionInfo.getHRegionInfo(res); - break resforloop; - } + Result res = scanner.next(); + ServerName currServer = + ServerName.parseFrom(res.getValue(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER)); + long startCode = Bytes.toLong(res.getValue(HConstants.CATALOG_FAMILY, + HConstants.STARTCODE_QUALIFIER)); + + for (JVMClusterUtil.RegionServerThread rs : + TEST_UTIL.getHBaseCluster().getRegionServerThreads()) { + + ServerName sn = rs.getRegionServer().getServerName(); + + // When we find a diff RS, change the assignment and break + if (!currServer.getHostAndPort().equals(sn.getHostAndPort()) || + startCode != sn.getStartcode()) { + Put put = new Put(res.getRow()); + put.setDurability(Durability.SKIP_WAL); + put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, + Bytes.toBytes(sn.getHostAndPort())); + put.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, + Bytes.toBytes(sn.getStartcode())); + meta.put(put); + hri = HRegionInfo.getHRegionInfo(res); + break; } } @@ -207,8 +215,8 @@ public class TestHBaseFsck { private HRegionInfo createRegion(Configuration conf, final HTableDescriptor htd, byte[] startKey, byte[] endKey) throws IOException { - HTable meta = new HTable(conf, HConstants.META_TABLE_NAME, executorService); - HRegionInfo hri = new HRegionInfo(htd.getName(), startKey, endKey); + HTable meta = new HTable(conf, TableName.META_TABLE_NAME, executorService); + HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKey, endKey); MetaEditor.addRegionToMeta(meta, hri); meta.close(); return hri; @@ -217,7 +225,7 @@ public class TestHBaseFsck { /** * Debugging method to dump the contents of meta. */ - private void dumpMeta(byte[] tableName) throws IOException { + private void dumpMeta(TableName tableName) throws IOException { List metaRows = TEST_UTIL.getMetaTableRows(tableName); for (byte[] row : metaRows) { LOG.info(Bytes.toString(row)); @@ -263,7 +271,7 @@ public class TestHBaseFsck { byte[] startKey, byte[] endKey, boolean unassign, boolean metaRow, boolean hdfs, boolean regionInfoOnly) throws IOException, InterruptedException { LOG.info("** Before delete:"); - dumpMeta(htd.getName()); + dumpMeta(htd.getTableName()); Map hris = tbl.getRegionLocations(); for (Entry e: hris.entrySet()) { @@ -284,7 +292,8 @@ public class TestHBaseFsck { LOG.info("deleting hdfs .regioninfo data: " + hri.toString() + hsa.toString()); Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); - Path p = new Path(rootDir + "/" + htd.getNameAsString(), hri.getEncodedName()); + Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), + hri.getEncodedName()); Path hriPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE); fs.delete(hriPath, true); } @@ -293,7 +302,8 @@ public class TestHBaseFsck { LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString()); Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); - Path p = new Path(rootDir + "/" + htd.getNameAsString(), hri.getEncodedName()); + Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), + hri.getEncodedName()); HBaseFsck.debugLsr(conf, p); boolean success = fs.delete(p, true); LOG.info("Deleted " + p + " sucessfully? " + success); @@ -301,7 +311,7 @@ public class TestHBaseFsck { } if (metaRow) { - HTable meta = new HTable(conf, HConstants.META_TABLE_NAME, executorService); + HTable meta = new HTable(conf, TableName.META_TABLE_NAME, executorService); Delete delete = new Delete(deleteRow); meta.delete(delete); } @@ -309,9 +319,9 @@ public class TestHBaseFsck { LOG.info(hri.toString() + hsa.toString()); } - TEST_UTIL.getMetaTableRows(htd.getName()); + TEST_UTIL.getMetaTableRows(htd.getTableName()); LOG.info("*** After delete:"); - dumpMeta(htd.getName()); + dumpMeta(htd.getTableName()); } /** @@ -321,12 +331,12 @@ public class TestHBaseFsck { * @throws InterruptedException * @throws KeeperException */ - HTable setupTable(String tablename) throws Exception { + HTable setupTable(TableName tablename) throws Exception { HTableDescriptor desc = new HTableDescriptor(tablename); HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM)); desc.addFamily(hcd); // If a table has no CF's it doesn't get checked TEST_UTIL.getHBaseAdmin().createTable(desc, SPLITS); - tbl = new HTable(TEST_UTIL.getConfiguration(), Bytes.toBytes(tablename), executorService); + tbl = new HTable(TEST_UTIL.getConfiguration(), tablename, executorService); List puts = new ArrayList(); for (byte[] row : ROWKEYS) { @@ -358,15 +368,14 @@ public class TestHBaseFsck { * @param tablename * @throws IOException */ - void deleteTable(String tablename) throws IOException { + void deleteTable(TableName tablename) throws IOException { HBaseAdmin admin = new HBaseAdmin(conf); admin.getConnection().clearRegionCache(); - byte[] tbytes = Bytes.toBytes(tablename); - admin.disableTableAsync(tbytes); + admin.disableTableAsync(tablename); long totalWait = 0; long maxWait = 30*1000; long sleepTime = 250; - while (!admin.isTableDisabled(tbytes)) { + while (!admin.isTableDisabled(tablename)) { try { Thread.sleep(sleepTime); totalWait += sleepTime; @@ -378,7 +387,7 @@ public class TestHBaseFsck { fail("Interrupted when trying to disable table " + tablename); } } - admin.deleteTable(tbytes); + admin.deleteTable(tablename); } /** @@ -387,7 +396,7 @@ public class TestHBaseFsck { @Test public void testHBaseFsckClean() throws Exception { assertNoErrors(doFsck(conf, false)); - String table = "tableClean"; + TableName table = TableName.valueOf("tableClean"); try { HBaseFsck hbck = doFsck(conf, false); assertNoErrors(hbck); @@ -410,7 +419,8 @@ public class TestHBaseFsck { */ @Test public void testHbckThreadpooling() throws Exception { - String table = "tableDupeStartKey"; + TableName table = + TableName.valueOf("tableDupeStartKey"); try { // Create table with 4 regions setupTable(table); @@ -428,15 +438,15 @@ public class TestHBaseFsck { @Test public void testHbckFixOrphanTable() throws Exception { - String table = "tableInfo"; + TableName table = TableName.valueOf("tableInfo"); FileSystem fs = null; Path tableinfo = null; try { setupTable(table); HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); - Path hbaseTableDir = HTableDescriptor.getTableDir( - FSUtils.getRootDir(conf), Bytes.toBytes(table)); + Path hbaseTableDir = FSUtils.getTableDir( + FSUtils.getRootDir(conf), table); fs = hbaseTableDir.getFileSystem(conf); FileStatus status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir); tableinfo = status.getPath(); @@ -453,21 +463,21 @@ public class TestHBaseFsck { status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir); assertNotNull(status); - HTableDescriptor htd = admin.getTableDescriptor(table.getBytes()); + HTableDescriptor htd = admin.getTableDescriptor(table); htd.setValue("NOT_DEFAULT", "true"); admin.disableTable(table); - admin.modifyTable(table.getBytes(), htd); + admin.modifyTable(table, htd); admin.enableTable(table); fs.delete(status.getPath(), true); // fix OrphanTable with cache - htd = admin.getTableDescriptor(table.getBytes()); // warms up cached htd on master + htd = admin.getTableDescriptor(table); // warms up cached htd on master hbck = doFsck(conf, true); assertNoErrors(hbck); status = null; status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir); assertNotNull(status); - htd = admin.getTableDescriptor(table.getBytes()); + htd = admin.getTableDescriptor(table); assertEquals(htd.getValue("NOT_DEFAULT"), "true"); } finally { fs.rename(new Path("/.tableinfo"), tableinfo); @@ -481,7 +491,8 @@ public class TestHBaseFsck { */ @Test public void testDupeStartKey() throws Exception { - String table = "tableDupeStartKey"; + TableName table = + TableName.valueOf("tableDupeStartKey"); try { setupTable(table); assertNoErrors(doFsck(conf, false)); @@ -557,7 +568,8 @@ public class TestHBaseFsck { */ @Test public void testDupeRegion() throws Exception { - String table = "tableDupeRegion"; + TableName table = + TableName.valueOf("tableDupeRegion"); try { setupTable(table); assertNoErrors(doFsck(conf, false)); @@ -609,7 +621,8 @@ public class TestHBaseFsck { */ @Test public void testDegenerateRegions() throws Exception { - String table = "tableDegenerateRegions"; + TableName table = + TableName.valueOf("tableDegenerateRegions"); try { setupTable(table); assertNoErrors(doFsck(conf,false)); @@ -649,7 +662,8 @@ public class TestHBaseFsck { */ @Test public void testContainedRegionOverlap() throws Exception { - String table = "tableContainedRegionOverlap"; + TableName table = + TableName.valueOf("tableContainedRegionOverlap"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); @@ -690,7 +704,8 @@ public class TestHBaseFsck { */ @Test public void testSidelineOverlapRegion() throws Exception { - String table = "testSidelineOverlapRegion"; + TableName table = + TableName.valueOf("testSidelineOverlapRegion"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); @@ -742,7 +757,7 @@ public class TestHBaseFsck { assertNotNull(regionName); assertNotNull(serverName); - HTable meta = new HTable(conf, HConstants.META_TABLE_NAME, executorService); + HTable meta = new HTable(conf, TableName.META_TABLE_NAME, executorService); Put put = new Put(regionName); put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes.toBytes(serverName.getHostAndPort())); @@ -780,7 +795,8 @@ public class TestHBaseFsck { */ @Test public void testOverlapAndOrphan() throws Exception { - String table = "tableOverlapAndOrphan"; + TableName table = + TableName.valueOf("tableOverlapAndOrphan"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); @@ -824,7 +840,8 @@ public class TestHBaseFsck { */ @Test public void testCoveredStartKey() throws Exception { - String table = "tableCoveredStartKey"; + TableName table = + TableName.valueOf("tableCoveredStartKey"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); @@ -864,7 +881,8 @@ public class TestHBaseFsck { */ @Test public void testRegionHole() throws Exception { - String table = "tableRegionHole"; + TableName table = + TableName.valueOf("tableRegionHole"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); @@ -898,7 +916,8 @@ public class TestHBaseFsck { */ @Test public void testHDFSRegioninfoMissing() throws Exception { - String table = "tableHDFSRegioininfoMissing"; + TableName table = + TableName.valueOf("tableHDFSRegioininfoMissing"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); @@ -934,7 +953,8 @@ public class TestHBaseFsck { */ @Test public void testNotInMetaOrDeployedHole() throws Exception { - String table = "tableNotInMetaOrDeployedHole"; + TableName table = + TableName.valueOf("tableNotInMetaOrDeployedHole"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); @@ -968,7 +988,8 @@ public class TestHBaseFsck { */ @Test public void testNotInMetaHole() throws Exception { - String table = "tableNotInMetaHole"; + TableName table = + TableName.valueOf("tableNotInMetaHole"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); @@ -1003,13 +1024,14 @@ public class TestHBaseFsck { */ @Test public void testNotInHdfs() throws Exception { - String table = "tableNotInHdfs"; + TableName table = + TableName.valueOf("tableNotInHdfs"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); // make sure data in regions, if in hlog only there is no data loss - TEST_UTIL.getHBaseAdmin().flush(table); + TEST_UTIL.getHBaseAdmin().flush(table.getName()); // Mess it up by leaving a hole in the hdfs data deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), @@ -1037,12 +1059,12 @@ public class TestHBaseFsck { */ @Test public void testNoHdfsTable() throws Exception { - String table = "NoHdfsTable"; + TableName table = TableName.valueOf("NoHdfsTable"); setupTable(table); assertEquals(ROWKEYS.length, countRows()); // make sure data in regions, if in hlog only there is no data loss - TEST_UTIL.getHBaseAdmin().flush(table); + TEST_UTIL.getHBaseAdmin().flush(table.getName()); // Mess it up by leaving a giant hole in meta deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes(""), @@ -1096,7 +1118,8 @@ public class TestHBaseFsck { */ @Test public void testRegionShouldNotBeDeployed() throws Exception { - String table = "tableRegionShouldNotBeDeployed"; + TableName table = + TableName.valueOf("tableRegionShouldNotBeDeployed"); try { LOG.info("Starting testRegionShouldNotBeDeployed."); MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); @@ -1105,7 +1128,7 @@ public class TestHBaseFsck { byte[][] SPLIT_KEYS = new byte[][] { new byte[0], Bytes.toBytes("aaa"), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd") }; - HTableDescriptor htdDisabled = new HTableDescriptor(Bytes.toBytes(table)); + HTableDescriptor htdDisabled = new HTableDescriptor(table); htdDisabled.addFamily(new HColumnDescriptor(FAM)); // Write the .tableinfo @@ -1156,19 +1179,21 @@ public class TestHBaseFsck { */ @Test public void testFixByTable() throws Exception { - String table1 = "testFixByTable1"; - String table2 = "testFixByTable2"; + TableName table1 = + TableName.valueOf("testFixByTable1"); + TableName table2 = + TableName.valueOf("testFixByTable2"); try { setupTable(table1); // make sure data in regions, if in hlog only there is no data loss - TEST_UTIL.getHBaseAdmin().flush(table1); + TEST_UTIL.getHBaseAdmin().flush(table1.getName()); // Mess them up by leaving a hole in the hdfs data deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false, false, true); // don't rm meta setupTable(table2); // make sure data in regions, if in hlog only there is no data loss - TEST_UTIL.getHBaseAdmin().flush(table2); + TEST_UTIL.getHBaseAdmin().flush(table2.getName()); // Mess them up by leaving a hole in the hdfs data deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false, false, true); // don't rm meta @@ -1200,14 +1225,15 @@ public class TestHBaseFsck { */ @Test public void testLingeringSplitParent() throws Exception { - String table = "testLingeringSplitParent"; + TableName table = + TableName.valueOf("testLingeringSplitParent"); HTable meta = null; try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); // make sure data in regions, if in hlog only there is no data loss - TEST_UTIL.getHBaseAdmin().flush(table); + TEST_UTIL.getHBaseAdmin().flush(table.getName()); HRegionLocation location = tbl.getRegionLocation("B"); // Delete one region from meta, but not hdfs, unassign it. @@ -1215,12 +1241,13 @@ public class TestHBaseFsck { Bytes.toBytes("C"), true, true, false); // Create a new meta entry to fake it as a split parent. - meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getName(), executorService); + meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName(), + executorService); HRegionInfo hri = location.getRegionInfo(); - HRegionInfo a = new HRegionInfo(tbl.getTableName(), + HRegionInfo a = new HRegionInfo(tbl.getName(), Bytes.toBytes("B"), Bytes.toBytes("BM")); - HRegionInfo b = new HRegionInfo(tbl.getTableName(), + HRegionInfo b = new HRegionInfo(tbl.getName(), Bytes.toBytes("BM"), Bytes.toBytes("C")); hri.setOffline(true); @@ -1228,7 +1255,7 @@ public class TestHBaseFsck { MetaEditor.addRegionToMeta(meta, hri, a, b); meta.flushCommits(); - TEST_UTIL.getHBaseAdmin().flush(HConstants.META_TABLE_NAME); + TEST_UTIL.getHBaseAdmin().flush(TableName.META_TABLE_NAME.getName()); HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] { @@ -1258,7 +1285,7 @@ public class TestHBaseFsck { HConstants.SPLITA_QUALIFIER).isEmpty()); assertTrue(result.getColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER).isEmpty()); - TEST_UTIL.getHBaseAdmin().flush(HConstants.META_TABLE_NAME); + TEST_UTIL.getHBaseAdmin().flush(TableName.META_TABLE_NAME.getName()); // fix other issues doFsck(conf, true); @@ -1278,17 +1305,18 @@ public class TestHBaseFsck { */ @Test public void testValidLingeringSplitParent() throws Exception { - String table = "testLingeringSplitParent"; + TableName table = + TableName.valueOf("testLingeringSplitParent"); HTable meta = null; try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); // make sure data in regions, if in hlog only there is no data loss - TEST_UTIL.getHBaseAdmin().flush(table); + TEST_UTIL.getHBaseAdmin().flush(table.getName()); HRegionLocation location = tbl.getRegionLocation("B"); - meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getName()); + meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName()); HRegionInfo hri = location.getRegionInfo(); // do a regular split @@ -1327,17 +1355,18 @@ public class TestHBaseFsck { */ @Test public void testSplitDaughtersNotInMeta() throws Exception { - String table = "testSplitdaughtersNotInMeta"; + TableName table = + TableName.valueOf("testSplitdaughtersNotInMeta"); HTable meta = null; try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); // make sure data in regions, if in hlog only there is no data loss - TEST_UTIL.getHBaseAdmin().flush(table); + TEST_UTIL.getHBaseAdmin().flush(table.getName()); HRegionLocation location = tbl.getRegionLocation("B"); - meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getName()); + meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName()); HRegionInfo hri = location.getRegionInfo(); // do a regular split @@ -1390,7 +1419,8 @@ public class TestHBaseFsck { */ @Test(timeout=120000) public void testMissingFirstRegion() throws Exception { - String table = "testMissingFirstRegion"; + TableName table = + TableName.valueOf("testMissingFirstRegion"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); @@ -1418,7 +1448,8 @@ public class TestHBaseFsck { */ @Test(timeout=120000) public void testMissingLastRegion() throws Exception { - String table = "testMissingLastRegion"; + TableName table = + TableName.valueOf("testMissingLastRegion"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); @@ -1445,7 +1476,8 @@ public class TestHBaseFsck { */ @Test public void testFixAssignmentsAndNoHdfsChecking() throws Exception { - String table = "testFixAssignmentsAndNoHdfsChecking"; + TableName table = + TableName.valueOf("testFixAssignmentsAndNoHdfsChecking"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); @@ -1494,7 +1526,8 @@ public class TestHBaseFsck { */ @Test public void testFixMetaNotWorkingWithNoHdfsChecking() throws Exception { - String table = "testFixMetaNotWorkingWithNoHdfsChecking"; + TableName table = + TableName.valueOf("testFixMetaNotWorkingWithNoHdfsChecking"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); @@ -1541,7 +1574,8 @@ public class TestHBaseFsck { */ @Test public void testFixHdfsHolesNotWorkingWithNoHdfsChecking() throws Exception { - String table = "testFixHdfsHolesNotWorkingWithNoHdfsChecking"; + TableName table = + TableName.valueOf("testFixHdfsHolesNotWorkingWithNoHdfsChecking"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); @@ -1604,8 +1638,8 @@ public class TestHBaseFsck { * @return Path of a flushed hfile. * @throws IOException */ - Path getFlushedHFile(FileSystem fs, String table) throws IOException { - Path tableDir= FSUtils.getTablePath(FSUtils.getRootDir(conf), table); + Path getFlushedHFile(FileSystem fs, TableName table) throws IOException { + Path tableDir= FSUtils.getTableDir(FSUtils.getRootDir(conf), table); Path regionDir = FSUtils.getRegionDirs(fs, tableDir).get(0); Path famDir = new Path(regionDir, FAM_STR); @@ -1628,11 +1662,11 @@ public class TestHBaseFsck { */ @Test(timeout=120000) public void testQuarantineCorruptHFile() throws Exception { - String table = name.getMethodName(); + TableName table = TableName.valueOf(name.getMethodName()); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); - TEST_UTIL.getHBaseAdmin().flush(table); // flush is async. + TEST_UTIL.getHBaseAdmin().flush(table.getName()); // flush is async. FileSystem fs = FileSystem.get(conf); Path hfile = getFlushedHFile(fs, table); @@ -1666,17 +1700,18 @@ public class TestHBaseFsck { /** * Test that use this should have a timeout, because this method could potentially wait forever. */ - private void doQuarantineTest(String table, HBaseFsck hbck, int check, int corrupt, int fail, - int quar, int missing) throws Exception { + private void doQuarantineTest(TableName table, HBaseFsck hbck, int check, + int corrupt, int fail, int quar, int missing) throws Exception { try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); - TEST_UTIL.getHBaseAdmin().flush(table); // flush is async. + TEST_UTIL.getHBaseAdmin().flush(table.getName()); // flush is async. // Mess it up by leaving a hole in the assignment, meta, and hdfs data TEST_UTIL.getHBaseAdmin().disableTable(table); - String[] args = {"-sidelineCorruptHFiles", "-repairHoles", "-ignorePreCheckPermission", table}; + String[] args = {"-sidelineCorruptHFiles", "-repairHoles", "-ignorePreCheckPermission", + table.getNameAsString()}; ExecutorService exec = new ScheduledThreadPoolExecutor(10); HBaseFsck res = hbck.exec(exec, args); @@ -1709,7 +1744,7 @@ public class TestHBaseFsck { */ @Test(timeout=120000) public void testQuarantineMissingHFile() throws Exception { - String table = name.getMethodName(); + TableName table = TableName.valueOf(name.getMethodName()); ExecutorService exec = new ScheduledThreadPoolExecutor(10); // inject a fault in the hfcc created. final FileSystem fs = FileSystem.get(conf); @@ -1738,7 +1773,7 @@ public class TestHBaseFsck { // files in a column family on initial creation -- as suggested by Matteo. @Ignore @Test(timeout=120000) public void testQuarantineMissingFamdir() throws Exception { - String table = name.getMethodName(); + TableName table = TableName.valueOf(name.getMethodName()); ExecutorService exec = new ScheduledThreadPoolExecutor(10); // inject a fault in the hfcc created. final FileSystem fs = FileSystem.get(conf); @@ -1765,7 +1800,7 @@ public class TestHBaseFsck { */ @Test(timeout=120000) public void testQuarantineMissingRegionDir() throws Exception { - String table = name.getMethodName(); + TableName table = TableName.valueOf(name.getMethodName()); ExecutorService exec = new ScheduledThreadPoolExecutor(10); // inject a fault in the hfcc created. final FileSystem fs = FileSystem.get(conf); @@ -1791,14 +1826,15 @@ public class TestHBaseFsck { */ @Test public void testLingeringReferenceFile() throws Exception { - String table = "testLingeringReferenceFile"; + TableName table = + TableName.valueOf("testLingeringReferenceFile"); try { setupTable(table); assertEquals(ROWKEYS.length, countRows()); // Mess it up by creating a fake reference file FileSystem fs = FileSystem.get(conf); - Path tableDir= FSUtils.getTablePath(FSUtils.getRootDir(conf), table); + Path tableDir= FSUtils.getTableDir(FSUtils.getRootDir(conf), table); Path regionDir = FSUtils.getRegionDirs(fs, tableDir).get(0); Path famDir = new Path(regionDir, FAM_STR); Path fakeReferenceFile = new Path(famDir, "fbce357483ceea.12144538"); @@ -1820,17 +1856,32 @@ public class TestHBaseFsck { */ @Test public void testMissingRegionInfoQualifier() throws Exception { - String table = "testMissingRegionInfoQualifier"; + TableName table = + TableName.valueOf("testMissingRegionInfoQualifier"); try { setupTable(table); // Mess it up by removing the RegionInfo for one region. - HTable meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getName()); - ResultScanner scanner = meta.getScanner(new Scan()); - Result result = scanner.next(); - Delete delete = new Delete (result.getRow()); - delete.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); - meta.delete(delete); + final List deletes = new LinkedList(); + HTable meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName()); + MetaScanner.metaScan(conf, new MetaScanner.MetaScannerVisitor() { + + @Override + public boolean processRow(Result rowResult) throws IOException { + if(!HTableDescriptor.isSystemTable(MetaScanner.getHRegionInfo(rowResult) + .getTableName())) { + Delete delete = new Delete(rowResult.getRow()); + delete.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + deletes.add(delete); + } + return true; + } + + @Override + public void close() throws IOException { + } + }); + meta.delete(deletes); // Mess it up by creating a fake META entry with no associated RegionInfo meta.put(new Put(Bytes.toBytes(table + ",,1361911384013.810e28f59a57da91c66")).add( @@ -1950,7 +2001,8 @@ public class TestHBaseFsck { // obtain one lock final TableLockManager tableLockManager = TableLockManager.createTableLockManager(conf, TEST_UTIL.getZooKeeperWatcher(), mockName); - TableLock writeLock = tableLockManager.writeLock(Bytes.toBytes("foo"), "testCheckTableLocks"); + TableLock writeLock = tableLockManager.writeLock(TableName.valueOf("foo"), + "testCheckTableLocks"); writeLock.acquire(); hbck = doFsck(conf, false); assertNoErrors(hbck); // should not have expired, no problems @@ -1964,7 +2016,8 @@ public class TestHBaseFsck { final CountDownLatch latch = new CountDownLatch(1); new Thread() { public void run() { - TableLock readLock = tableLockManager.writeLock(Bytes.toBytes("foo"), "testCheckTableLocks"); + TableLock readLock = tableLockManager.writeLock(TableName.valueOf("foo"), + "testCheckTableLocks"); try { latch.countDown(); readLock.acquire(); @@ -1998,7 +2051,8 @@ public class TestHBaseFsck { assertNoErrors(hbck); // ensure that locks are deleted - writeLock = tableLockManager.writeLock(Bytes.toBytes("foo"), "should acquire without blocking"); + writeLock = tableLockManager.writeLock(TableName.valueOf("foo"), + "should acquire without blocking"); writeLock.acquire(); // this should not block. writeLock.release(); // release for clean state } @@ -2022,7 +2076,7 @@ public class TestHBaseFsck { private void deleteMetaRegion(Configuration conf, boolean unassign, boolean hdfs, boolean regionInfoOnly) throws IOException, InterruptedException { HConnection connection = HConnectionManager.getConnection(conf); - HRegionLocation metaLocation = connection.locateRegion(HConstants.META_TABLE_NAME, + HRegionLocation metaLocation = connection.locateRegion(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW); ServerName hsa = new ServerName(metaLocation.getHostnamePort(), 0L); HRegionInfo hri = metaLocation.getRegionInfo(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckComparator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckComparator.java index ed071ce..86f811d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckComparator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckComparator.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.util; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo; @@ -34,15 +35,17 @@ import org.junit.experimental.categories.Category; @Category(SmallTests.class) public class TestHBaseFsckComparator { - byte[] table = Bytes.toBytes("table1"); - byte[] table2 = Bytes.toBytes("table2"); + TableName table = + TableName.valueOf("table1"); + TableName table2 = + TableName.valueOf("table2"); byte[] keyStart = Bytes.toBytes(""); byte[] keyA = Bytes.toBytes("A"); byte[] keyB = Bytes.toBytes("B"); byte[] keyC = Bytes.toBytes("C"); byte[] keyEnd = Bytes.toBytes(""); - static HbckInfo genHbckInfo(byte[] table, byte[] start, byte[] end, int time) { + static HbckInfo genHbckInfo(TableName table, byte[] start, byte[] end, int time) { return new HbckInfo(new MetaEntry(new HRegionInfo(table, start, end), null, time)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java index f280d54..ab7d2a3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java @@ -21,24 +21,24 @@ import static org.junit.Assert.*; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.SmallTests; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.mockito.Mockito; + +import java.io.IOException; /** * Test that the utility works as expected */ @Category(SmallTests.class) public class TestHFileArchiveUtil { - + private Path rootDir = new Path("./"); @Test public void testGetTableArchivePath() { - assertNotNull(HFileArchiveUtil.getTableArchivePath(new Path("table"))); - assertNotNull(HFileArchiveUtil.getTableArchivePath(new Path("root", new Path("table")))); + assertNotNull(HFileArchiveUtil.getTableArchivePath(rootDir, + TableName.valueOf("table"))); } @Test @@ -50,19 +50,19 @@ public class TestHFileArchiveUtil { @Test public void testRegionArchiveDir() { - Path tableDir = new Path("table"); Path regionDir = new Path("region"); - assertNotNull(HFileArchiveUtil.getRegionArchiveDir(tableDir, regionDir)); + assertNotNull(HFileArchiveUtil.getRegionArchiveDir(rootDir, + TableName.valueOf("table"), regionDir)); } @Test - public void testGetStoreArchivePath(){ + public void testGetStoreArchivePath() throws IOException { byte[] family = Bytes.toBytes("Family"); - Path tabledir = new Path("table"); - HRegionInfo region = new HRegionInfo(Bytes.toBytes("table")); - Configuration conf = null; - assertNotNull(HFileArchiveUtil.getStoreArchivePath(conf, region, tabledir, family)); - conf = new Configuration(); + Path tabledir = FSUtils.getTableDir(rootDir, + TableName.valueOf("table")); + HRegionInfo region = new HRegionInfo(TableName.valueOf("table")); + Configuration conf = new Configuration(); + FSUtils.setRootDir(conf, new Path("root")); assertNotNull(HFileArchiveUtil.getStoreArchivePath(conf, region, tabledir, family)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java index 7b192c3..8ee2349 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java @@ -66,7 +66,7 @@ public class TestMergeTable { */ @Test (timeout=300000) public void testMergeTable() throws Exception { // Table we are manually creating offline. - HTableDescriptor desc = new HTableDescriptor(Bytes.toBytes("test")); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("test"))); desc.addFamily(new HColumnDescriptor(COLUMN_NAME)); // Set maximum regionsize down. @@ -114,14 +114,14 @@ public class TestMergeTable { CatalogTracker ct = new CatalogTracker(c); ct.start(); List originalTableRegions = - MetaReader.getTableRegions(ct, desc.getName()); + MetaReader.getTableRegions(ct, desc.getTableName()); LOG.info("originalTableRegions size=" + originalTableRegions.size() + "; " + originalTableRegions); HBaseAdmin admin = new HBaseAdmin(c); - admin.disableTable(desc.getName()); - HMerge.merge(c, FileSystem.get(c), desc.getName()); + admin.disableTable(desc.getTableName()); + HMerge.merge(c, FileSystem.get(c), desc.getTableName()); List postMergeTableRegions = - MetaReader.getTableRegions(ct, desc.getName()); + MetaReader.getTableRegions(ct, desc.getTableName()); LOG.info("postMergeTableRegions size=" + postMergeTableRegions.size() + "; " + postMergeTableRegions); assertTrue("originalTableRegions=" + originalTableRegions.size() + @@ -137,7 +137,7 @@ public class TestMergeTable { private HRegion createRegion(final HTableDescriptor desc, byte [] startKey, byte [] endKey, int firstRow, int nrows, Path rootdir) throws IOException { - HRegionInfo hri = new HRegionInfo(desc.getName(), startKey, endKey); + HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey); HRegion region = HRegion.createHRegion(hri, rootdir, UTIL.getConfiguration(), desc); LOG.info("Created region " + region.getRegionNameAsString()); for(int i = firstRow; i < firstRow + nrows; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java index c234319..9e780b3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; -import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.util.ToolRunner; import org.junit.experimental.categories.Category; @@ -68,28 +67,28 @@ public class TestMergeTool extends HBaseTestCase { this.conf.set("hbase.hstore.compactionThreshold", "2"); // Create table description - this.desc = new HTableDescriptor("TestMergeTool"); + this.desc = new HTableDescriptor(TableName.valueOf("TestMergeTool")); this.desc.addFamily(new HColumnDescriptor(FAMILY)); /* * Create the HRegionInfos for the regions. */ // Region 0 will contain the key range [row_0200,row_0300) - sourceRegions[0] = new HRegionInfo(this.desc.getName(), + sourceRegions[0] = new HRegionInfo(this.desc.getTableName(), Bytes.toBytes("row_0200"), Bytes.toBytes("row_0300")); // Region 1 will contain the key range [row_0250,row_0400) and overlaps // with Region 0 sourceRegions[1] = - new HRegionInfo(this.desc.getName(), + new HRegionInfo(this.desc.getTableName(), Bytes.toBytes("row_0250"), Bytes.toBytes("row_0400")); // Region 2 will contain the key range [row_0100,row_0200) and is adjacent // to Region 0 or the region resulting from the merge of Regions 0 and 1 sourceRegions[2] = - new HRegionInfo(this.desc.getName(), + new HRegionInfo(this.desc.getTableName(), Bytes.toBytes("row_0100"), Bytes.toBytes("row_0200")); @@ -97,13 +96,13 @@ public class TestMergeTool extends HBaseTestCase { // adjacent to any of Regions 0, 1, 2 or the merged result of any or all // of those regions sourceRegions[3] = - new HRegionInfo(this.desc.getName(), + new HRegionInfo(this.desc.getTableName(), Bytes.toBytes("row_0500"), Bytes.toBytes("row_0600")); // Region 4 will have empty start and end keys and overlaps all regions. sourceRegions[4] = - new HRegionInfo(this.desc.getName(), + new HRegionInfo(this.desc.getTableName(), HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY); @@ -195,7 +194,7 @@ public class TestMergeTool extends HBaseTestCase { LOG.info(msg); LOG.info("fs2=" + this.conf.get("fs.defaultFS")); int errCode = ToolRunner.run(this.conf, merger, - new String[] {this.desc.getNameAsString(), regionName1, regionName2} + new String[] {this.desc.getTableName().getNameAsString(), regionName1, regionName2} ); assertTrue("'" + msg + "' failed with errCode " + errCode, errCode == 0); HRegionInfo mergedInfo = merger.getMergedHRegionInfo(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java index 94edb26..aed7edd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMiniClusterLoadSequential.java @@ -26,6 +26,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -55,7 +56,8 @@ public class TestMiniClusterLoadSequential { private static final Log LOG = LogFactory.getLog( TestMiniClusterLoadSequential.class); - protected static final byte[] TABLE = Bytes.toBytes("load_test_tbl"); + protected static final TableName TABLE = + TableName.valueOf("load_test_tbl"); protected static final byte[] CF = Bytes.toBytes("load_test_cf"); protected static final int NUM_THREADS = 8; protected static final int NUM_RS = 2; @@ -108,13 +110,13 @@ public class TestMiniClusterLoadSequential { } protected MultiThreadedReader prepareReaderThreads(LoadTestDataGenerator dataGen, - Configuration conf, byte[] tableName, double verifyPercent) { + Configuration conf, TableName tableName, double verifyPercent) { MultiThreadedReader reader = new MultiThreadedReader(dataGen, conf, tableName, verifyPercent); return reader; } protected MultiThreadedWriter prepareWriterThreads(LoadTestDataGenerator dataGen, - Configuration conf, byte[] tableName) { + Configuration conf, TableName tableName) { MultiThreadedWriter writer = new MultiThreadedWriter(dataGen, conf, tableName); writer.setMultiPut(isMultiPut); return writer; @@ -141,7 +143,7 @@ public class TestMiniClusterLoadSequential { protected void createPreSplitLoadTestTable(HTableDescriptor htd, HColumnDescriptor hcd) throws IOException { HBaseTestingUtility.createPreSplitLoadTestTable(conf, htd, hcd); - TEST_UTIL.waitUntilAllRegionsAssigned(htd.getName()); + TEST_UTIL.waitUntilAllRegionsAssigned(htd.getTableName()); } protected void prepareForLoadTest() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestTableName.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestTableName.java new file mode 100644 index 0000000..9a56b76 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestTableName.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import org.apache.hadoop.hbase.TableName; +import org.junit.rules.TestWatcher; +import org.junit.runner.Description; + +/** + * Returns a {@code byte[]} containing the name of the currently running test method. + */ +public class TestTableName extends TestWatcher { + private TableName tableName; + + /** + * Invoked when a test is about to start + */ + @Override + protected void starting(Description description) { + tableName = TableName.valueOf(description.getMethodName()); + } + + public TableName getTableName() { + return tableName; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java index e4e1309..1f6ec70 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java @@ -20,12 +20,14 @@ package org.apache.hadoop.hbase.util.hbck; import static org.junit.Assert.assertEquals; import java.util.ArrayList; -import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; +import com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.HBaseFsck; import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE; @@ -37,14 +39,15 @@ public class HbckTestingUtil { } public static HBaseFsck doFsck( - Configuration conf, boolean fix, String table) throws Exception { + Configuration conf, boolean fix, TableName table) throws Exception { return doFsck(conf, fix, fix, fix, fix,fix, fix, fix, fix, fix, fix, table); } public static HBaseFsck doFsck(Configuration conf, boolean fixAssignments, boolean fixMeta, boolean fixHdfsHoles, boolean fixHdfsOverlaps, boolean fixHdfsOrphans, boolean fixTableOrphans, boolean fixVersionFile, - boolean fixReferenceFiles, boolean fixEmptyMetaRegionInfo, boolean fixTableLocks, String table) throws Exception { + boolean fixReferenceFiles, boolean fixEmptyMetaRegionInfo, boolean fixTableLocks, + TableName table) throws Exception { HBaseFsck fsck = new HBaseFsck(conf, exec); fsck.connect(); fsck.setDisplayFullReport(); // i.e. -details @@ -73,8 +76,8 @@ public class HbckTestingUtil { * @return * @throws Exception */ - public static HBaseFsck doHFileQuarantine(Configuration conf, String table) throws Exception { - String[] args = {"-sidelineCorruptHFiles", "-ignorePreCheckPermission", table}; + public static HBaseFsck doHFileQuarantine(Configuration conf, TableName table) throws Exception { + String[] args = {"-sidelineCorruptHFiles", "-ignorePreCheckPermission", table.getNameAsString()}; HBaseFsck hbck = new HBaseFsck(conf, exec); hbck.exec(exec, args); return hbck; @@ -87,6 +90,9 @@ public class HbckTestingUtil { public static void assertErrors(HBaseFsck fsck, ERROR_CODE[] expectedErrors) { List errs = fsck.getErrors().getErrorList(); - assertEquals(Arrays.asList(expectedErrors), errs); + Collections.sort(errs); + List expErrs = Lists.newArrayList(expectedErrors); + Collections.sort(expErrs); + assertEquals(expErrs, errs); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java index 1c5fa70..d1b937f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java @@ -31,12 +31,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.LargeTests; +import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.client.Delete; @@ -83,7 +84,7 @@ public class OfflineMetaRebuildTestCore { private final static String TABLE_BASE = "tableMetaRebuild"; private static int tableIdx = 0; - protected String table = "tableMetaRebuild"; + protected TableName table = TableName.valueOf("tableMetaRebuild"); @Before public void setUpBefore() throws Exception { @@ -94,11 +95,11 @@ public class OfflineMetaRebuildTestCore { assertEquals(0, TEST_UTIL.getHBaseAdmin().listTables().length); // setup the table - table = TABLE_BASE + "-" + tableIdx; + table = TableName.valueOf(TABLE_BASE + "-" + tableIdx); tableIdx++; htbl = setupTable(table); populateTable(htbl); - assertEquals(4, scanMeta()); + assertEquals(5, scanMeta()); LOG.info("Table " + table + " has " + tableRowCount(conf, table) + " entries."); assertEquals(16, tableRowCount(conf, table)); @@ -119,7 +120,7 @@ public class OfflineMetaRebuildTestCore { * @throws InterruptedException * @throws KeeperException */ - private HTable setupTable(String tablename) throws Exception { + private HTable setupTable(TableName tablename) throws Exception { HTableDescriptor desc = new HTableDescriptor(tablename); HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM)); desc.addFamily(hcd); // If a table has no CF's it doesn't get checked @@ -128,7 +129,7 @@ public class OfflineMetaRebuildTestCore { } private void dumpMeta(HTableDescriptor htd) throws IOException { - List metaRows = TEST_UTIL.getMetaTableRows(htd.getName()); + List metaRows = TEST_UTIL.getMetaTableRows(htd.getTableName()); for (byte[] row : metaRows) { LOG.info(Bytes.toString(row)); } @@ -184,11 +185,11 @@ public class OfflineMetaRebuildTestCore { LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString()); Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); - Path p = new Path(rootDir + "/" + htd.getNameAsString(), + Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), hri.getEncodedName()); fs.delete(p, true); - HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); + HTable meta = new HTable(conf, TableName.META_TABLE_NAME); Delete delete = new Delete(deleteRow); meta.delete(delete); meta.close(); @@ -196,21 +197,21 @@ public class OfflineMetaRebuildTestCore { LOG.info(hri.toString() + hsa.toString()); } - TEST_UTIL.getMetaTableRows(htd.getName()); + TEST_UTIL.getMetaTableRows(htd.getTableName()); LOG.info("After delete:"); dumpMeta(htd); } protected HRegionInfo createRegion(Configuration conf, final HTable htbl, byte[] startKey, byte[] endKey) throws IOException { - HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); + HTable meta = new HTable(conf, TableName.META_TABLE_NAME); HTableDescriptor htd = htbl.getTableDescriptor(); - HRegionInfo hri = new HRegionInfo(htbl.getTableName(), startKey, endKey); + HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey); LOG.info("manually adding regioninfo and hdfs data: " + hri.toString()); Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); - Path p = new Path(rootDir + "/" + htd.getNameAsString(), + Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()), hri.getEncodedName()); fs.mkdirs(p); Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE); @@ -228,13 +229,19 @@ public class OfflineMetaRebuildTestCore { // Mess it up by blowing up meta. HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); Scan s = new Scan(); - HTable meta = new HTable(conf, HConstants.META_TABLE_NAME); + HTable meta = new HTable(conf, TableName.META_TABLE_NAME); ResultScanner scanner = meta.getScanner(s); List dels = new ArrayList(); for (Result r : scanner) { - Delete d = new Delete(r.getRow()); - dels.add(d); - admin.unassign(r.getRow(), true); + HRegionInfo info = + HRegionInfo.getHRegionInfo(r); + if(info != null && + !info.getTableName().getNamespaceAsString() + .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { + Delete d = new Delete(r.getRow()); + dels.add(d); + admin.unassign(r.getRow(), true); + } } meta.delete(dels); meta.flushCommits(); @@ -248,7 +255,7 @@ public class OfflineMetaRebuildTestCore { * * @return # of rows in the specified table */ - protected int tableRowCount(Configuration conf, String table) + protected int tableRowCount(Configuration conf, TableName table) throws IOException { HTable t = new HTable(conf, table); Scan st = new Scan(); @@ -270,7 +277,7 @@ public class OfflineMetaRebuildTestCore { */ protected int scanMeta() throws IOException { int count = 0; - HTable meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getName()); + HTable meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName()); ResultScanner scanner = meta.getScanner(new Scan()); LOG.info("Table: " + Bytes.toString(meta.getTableName())); for (Result res : scanner) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java index b96fc33..1a85bc5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java @@ -45,12 +45,13 @@ public class TestOfflineMetaRebuildBase extends OfflineMetaRebuildTestCore { wipeOutMeta(); // is meta really messed up? - assertEquals(0, scanMeta()); + assertEquals(1, scanMeta()); assertErrors(doFsck(conf, false), - new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, + new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, - ERROR_CODE.NOT_IN_META_OR_DEPLOYED, }); + ERROR_CODE.NOT_IN_META_OR_DEPLOYED, + ERROR_CODE.NOT_IN_META_OR_DEPLOYED}); // Note, would like to check # of tables, but this takes a while to time // out. @@ -74,7 +75,7 @@ public class TestOfflineMetaRebuildBase extends OfflineMetaRebuildTestCore { LOG.info("No more RIT in ZK, now doing final test verification"); // everything is good again. - assertEquals(4, scanMeta()); + assertEquals(5, scanMeta()); HTableDescriptor[] htbls = TEST_UTIL.getHBaseAdmin().listTables(); LOG.info("Tables present after restart: " + Arrays.toString(htbls)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java index 4b76bef..c2e8b73 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java @@ -52,10 +52,11 @@ public class TestOfflineMetaRebuildHole extends OfflineMetaRebuildTestCore { wipeOutMeta(); // is meta really messed up? - assertEquals(0, scanMeta()); + assertEquals(1, scanMeta()); assertErrors(doFsck(conf, false), new ERROR_CODE[] { - ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, - ERROR_CODE.NOT_IN_META_OR_DEPLOYED, }); + ERROR_CODE.NOT_IN_META_OR_DEPLOYED, + ERROR_CODE.NOT_IN_META_OR_DEPLOYED, + ERROR_CODE.NOT_IN_META_OR_DEPLOYED}); // Note, would like to check # of tables, but this takes a while to time // out. @@ -76,9 +77,17 @@ public class TestOfflineMetaRebuildHole extends OfflineMetaRebuildTestCore { LOG.info("Waiting for no more RIT"); ZKAssign.blockUntilNoRIT(zkw); LOG.info("No more RIT in ZK, now doing final test verification"); + int tries = 60; + while(TEST_UTIL.getHBaseCluster() + .getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition().size() > 0 && + tries-- > 0) { + LOG.info("Waiting for RIT: "+TEST_UTIL.getHBaseCluster() + .getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition()); + Thread.sleep(1000); + } // Meta still messed up. - assertEquals(0, scanMeta()); + assertEquals(1, scanMeta()); HTableDescriptor[] htbls = TEST_UTIL.getHBaseAdmin().listTables(); LOG.info("Tables present after restart: " + Arrays.toString(htbls)); @@ -86,8 +95,9 @@ public class TestOfflineMetaRebuildHole extends OfflineMetaRebuildTestCore { // so the table is still present and this should be 1. assertEquals(1, htbls.length); assertErrors(doFsck(conf, false), new ERROR_CODE[] { - ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, - ERROR_CODE.NOT_IN_META_OR_DEPLOYED, }); + ERROR_CODE.NOT_IN_META_OR_DEPLOYED, + ERROR_CODE.NOT_IN_META_OR_DEPLOYED, + ERROR_CODE.NOT_IN_META_OR_DEPLOYED}); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java index 660e34b..9cb30fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java @@ -54,12 +54,13 @@ public class TestOfflineMetaRebuildOverlap extends OfflineMetaRebuildTestCore { wipeOutMeta(); // is meta really messed up? - assertEquals(0, scanMeta()); + assertEquals(1, scanMeta()); assertErrors(doFsck(conf, false), - new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, + new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, - ERROR_CODE.NOT_IN_META_OR_DEPLOYED, }); + ERROR_CODE.NOT_IN_META_OR_DEPLOYED, + ERROR_CODE.NOT_IN_META_OR_DEPLOYED}); // Note, would like to check # of tables, but this takes a while to time // out. @@ -84,9 +85,17 @@ public class TestOfflineMetaRebuildOverlap extends OfflineMetaRebuildTestCore { LOG.info("Waiting for no more RIT"); ZKAssign.blockUntilNoRIT(zkw); LOG.info("No more RIT in ZK, now doing final test verification"); + int tries = 60; + while(TEST_UTIL.getHBaseCluster() + .getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition().size() > 0 && + tries-- > 0) { + LOG.info("Waiting for RIT: "+TEST_UTIL.getHBaseCluster() + .getMaster().getAssignmentManager().getRegionStates().getRegionsInTransition()); + Thread.sleep(1000); + } // Meta still messed up. - assertEquals(0, scanMeta()); + assertEquals(1, scanMeta()); HTableDescriptor[] htbls = TEST_UTIL.getHBaseAdmin().listTables(); LOG.info("Tables present after restart: " + Arrays.toString(htbls)); @@ -94,10 +103,11 @@ public class TestOfflineMetaRebuildOverlap extends OfflineMetaRebuildTestCore { // so the table is still present and this should be 1. assertEquals(1, htbls.length); assertErrors(doFsck(conf, false), - new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, + new ERROR_CODE[] { + ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, - ERROR_CODE.NOT_IN_META_OR_DEPLOYED, }); + ERROR_CODE.NOT_IN_META_OR_DEPLOYED}); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTable.java index f6baf84..a65e03e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTable.java @@ -51,7 +51,8 @@ public class TestZKTable { @Test public void testTableStates() throws ZooKeeperConnectionException, IOException, KeeperException { - final String name = "testDisabled"; + final TableName name = + TableName.valueOf("testDisabled"); Abortable abortable = new Abortable() { @Override public void abort(String why, Throwable e) { @@ -65,7 +66,7 @@ public class TestZKTable { }; ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), - name, abortable, true); + name.getNameAsString(), abortable, true); ZKTable zkt = new ZKTable(zkw); assertFalse(zkt.isEnabledTable(name)); assertFalse(zkt.isDisablingTable(name)); diff --git a/hbase-server/src/test/ruby/hbase/admin_test.rb b/hbase-server/src/test/ruby/hbase/admin_test.rb index 9050b58..fd6abca 100644 --- a/hbase-server/src/test/ruby/hbase/admin_test.rb +++ b/hbase-server/src/test/ruby/hbase/admin_test.rb @@ -37,11 +37,11 @@ module Hbase end define_test "exists? should return true when a table exists" do - assert(admin.exists?('.META.')) + assert(admin.exists?('hbase:meta')) end define_test "exists? should return false when a table exists" do - assert(!admin.exists?('.NOT.EXISTS.')) + assert(!admin.exists?('NOT.EXISTS')) end define_test "enabled? should return true for enabled tables" do @@ -74,38 +74,38 @@ module Hbase end define_test "list should not return meta tables" do - assert(!admin.list.member?('.META.')) + assert(!admin.list.member?('hbase:meta')) end #------------------------------------------------------------------------------- define_test "flush should work" do - admin.flush('.META.') + admin.flush('hbase:meta') end #------------------------------------------------------------------------------- define_test "compact should work" do - admin.compact('.META.') + admin.compact('hbase:meta') end #------------------------------------------------------------------------------- define_test "major_compact should work" do - admin.major_compact('.META.') + admin.major_compact('hbase:meta') end #------------------------------------------------------------------------------- define_test "split should work" do - admin.split('.META.', nil) + admin.split('hbase:meta', nil) end #------------------------------------------------------------------------------- define_test "drop should fail on non-existent tables" do assert_raise(ArgumentError) do - admin.drop('.NOT.EXISTS.') + admin.drop('NOT.EXISTS') end end @@ -243,7 +243,7 @@ module Hbase define_test "alter should fail with non-existing tables" do assert_raise(ArgumentError) do - admin.alter('.NOT.EXISTS.', true, METHOD => 'delete', NAME => 'y') + admin.alter('NOT.EXISTS', true, METHOD => 'delete', NAME => 'y') end end diff --git a/hbase-server/src/test/ruby/hbase/hbase_test.rb b/hbase-server/src/test/ruby/hbase/hbase_test.rb index c2f5405..185ec3e 100644 --- a/hbase-server/src/test/ruby/hbase/hbase_test.rb +++ b/hbase-server/src/test/ruby/hbase/hbase_test.rb @@ -39,11 +39,11 @@ module Hbase end define_test "Hbase::Hbase#table should create a new table object when called the first time" do - assert_kind_of(::Hbase::Table, @hbase.table('.META.', @formatter)) + assert_kind_of(::Hbase::Table, @hbase.table('hbase:meta', @formatter)) end define_test "Hbase::Hbase#table should create a new table object every call" do - assert_not_same(@hbase.table('.META.', @formatter), @hbase.table('.META.', @formatter)) + assert_not_same(@hbase.table('hbase:meta', @formatter), @hbase.table('hbase:meta', @formatter)) end end end diff --git a/hbase-server/src/test/ruby/hbase/table_test.rb b/hbase-server/src/test/ruby/hbase/table_test.rb index 790332f..8170c53 100644 --- a/hbase-server/src/test/ruby/hbase/table_test.rb +++ b/hbase-server/src/test/ruby/hbase/table_test.rb @@ -37,7 +37,7 @@ module Hbase define_test "Hbase::Table constructor should not fail for existent tables" do assert_nothing_raised do - table('.META.') + table('hbase:meta') end end end @@ -55,7 +55,7 @@ module Hbase end define_test "is_meta_table? method should return true for the meta table" do - assert(table('.META.').is_meta_table?) + assert(table('hbase:meta').is_meta_table?) end define_test "is_meta_table? method should return false for a normal table" do @@ -65,7 +65,7 @@ module Hbase #------------------------------------------------------------------------------- define_test "get_all_columns should return columns list" do - cols = table('.META.').get_all_columns + cols = table('hbase:meta').get_all_columns assert_kind_of(Array, cols) assert(cols.length > 0) end @@ -73,19 +73,19 @@ module Hbase #------------------------------------------------------------------------------- define_test "parse_column_name should not return a qualifier for name-only column specifiers" do - col, qual = table('.META.').parse_column_name('foo') + col, qual = table('hbase:meta').parse_column_name('foo') assert_not_nil(col) assert_nil(qual) end define_test "parse_column_name should not return a qualifier for family-only column specifiers" do - col, qual = table('.META.').parse_column_name('foo:') + col, qual = table('hbase:meta').parse_column_name('foo:') assert_not_nil(col) assert_nil(qual) end define_test "parse_column_name should return a qualifier for family:qualifier column specifiers" do - col, qual = table('.META.').parse_column_name('foo:bar') + col, qual = table('hbase:meta').parse_column_name('foo:bar') assert_not_nil(col) assert_not_nil(qual) end diff --git a/hbase-server/src/test/ruby/shell/shell_test.rb b/hbase-server/src/test/ruby/shell/shell_test.rb index 1060074..988d09e 100644 --- a/hbase-server/src/test/ruby/shell/shell_test.rb +++ b/hbase-server/src/test/ruby/shell/shell_test.rb @@ -39,11 +39,11 @@ class ShellTest < Test::Unit::TestCase #------------------------------------------------------------------------------- define_test "Shell::Shell#hbase_table should return a table instance" do - assert_kind_of(Hbase::Table, @shell.hbase_table('.META.')) + assert_kind_of(Hbase::Table, @shell.hbase_table('hbase:meta')) end define_test "Shell::Shell#hbase_table should not cache table instances" do - assert_not_same(@shell.hbase_table('.META.'), @shell.hbase_table('.META.')) + assert_not_same(@shell.hbase_table('hbase:meta'), @shell.hbase_table('hbase:meta')) end #-------------------------------------------------------------------------------