diff --git security/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java security/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index c1f20de..b636e3e 100644 --- security/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ security/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -673,14 +673,6 @@ public class AccessController extends BaseRegionObserver } @Override - public void preGetClosestRowBefore(final ObserverContext c, - final byte [] row, final byte [] family, final Result result) - throws IOException { - requirePermission(TablePermission.Action.READ, c.getEnvironment(), - (family != null ? Lists.newArrayList(family) : null)); - } - - @Override public void preGet(final ObserverContext c, final Get get, final List result) throws IOException { /* diff --git src/main/java/org/apache/hadoop/hbase/HConstants.java src/main/java/org/apache/hadoop/hbase/HConstants.java index 07041b5..5de107a 100644 --- src/main/java/org/apache/hadoop/hbase/HConstants.java +++ src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -48,7 +48,10 @@ public final class HConstants { /** long constant for zero */ public static final Long ZERO_L = Long.valueOf(0L); + public static final String NINES = "99999999999999"; + + @Deprecated public static final String ZEROES = "00000000000000"; // For migration @@ -62,9 +65,11 @@ public final class HConstants { * Version 5 changes versions in catalog table regions. * Version 6 enables blockcaching on catalog tables. * Version 7 introduces hfile -- hbase 0.19 to 0.20.. + * version 8 changed the metaFormat */ // public static final String FILE_SYSTEM_VERSION = "6"; - public static final String FILE_SYSTEM_VERSION = "7"; + public static final String FILE_SYSTEM_VERSION = "8"; + public static final int FILE_SYSTEM_VERSION_INT = Integer.parseInt(FILE_SYSTEM_VERSION); // Configuration parameters @@ -82,9 +87,6 @@ public final class HConstants { /** Cluster is fully-distributed */ public static final String CLUSTER_IS_DISTRIBUTED = "true"; - /** Default value for cluster distributed mode */ - public static final String DEFAULT_CLUSTER_DISTRIBUTED = CLUSTER_IS_LOCAL; - /** default host address */ public static final String DEFAULT_HOST = "0.0.0.0"; @@ -270,7 +272,7 @@ public final class HConstants { // Always store the location of the root table's HRegion. // This HRegion is never split. - // region name = table + startkey + regionid. This is the row key. + // region name = table + endkey + regionid. This is the row key. // each row in the root and meta tables describes exactly 1 region // Do we ever need to know all the information that we are storing? @@ -293,9 +295,6 @@ public final class HConstants { /** The META table's name. */ public static final byte [] META_TABLE_NAME = Bytes.toBytes(".META."); - /** delimiter used between portions of a region name */ - public static final int META_ROW_DELIMITER = ','; - /** The catalog family as a string*/ public static final String CATALOG_FAMILY_STR = "info"; @@ -336,6 +335,8 @@ public final class HConstants { * meta is up-to-date. */ public static final short META_VERSION = 0; + public static final short META_VERSION2 = 1; + // Other constants diff --git src/main/java/org/apache/hadoop/hbase/HRegionInfo.java src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index 8d83ff3..561460c 100644 --- src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -23,6 +23,7 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.EOFException; import java.io.IOException; +import java.nio.ByteBuffer; import java.util.Arrays; import org.apache.commons.logging.Log; @@ -33,7 +34,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.KeyValue.KVComparator; + + import org.apache.hadoop.hbase.migration.HRegionInfo090x; +import org.apache.hadoop.hbase.migration.HRegionInfo090x2; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.JenkinsHash; @@ -52,7 +56,8 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable { // VERSION == 0 when HRegionInfo had an HTableDescriptor inside it. public static final byte VERSION_PRE_092 = 0; - public static final byte VERSION = 1; + public static final byte VERSION = 2; + private static final Log LOG = LogFactory.getLog(HRegionInfo.class); /** @@ -61,13 +66,15 @@ implements WritableComparable { * in the filesystem. * * New region name format: - * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. + * <tablename><is_end_of_table_marker>,,<endkey>,<regionIdTimestamp>.<encodedName>. * where, + * <end_of_table_marker> HRegionInfo.NOT_END_OF_TABLE + * || HRegionInfo.END_OF_TABLE * <encodedName> is a hex version of the MD5 hash of - * <tablename>,<startkey>,<regionIdTimestamp> + * <tablename>,&<startkey>,<regionIdTimestamp> * * The old region name format: - * <tablename>,<startkey>,<regionIdTimestamp> + * <tablename>,<is_end_of_table_marker><endkey>,<regionIdTimestamp> * For region names in the old format, the encoded name is a 32-bit * JenkinsHash integer value (in its decimal notation, string form). *

@@ -83,6 +90,14 @@ implements WritableComparable { */ private static final int ENC_SEPARATOR = '.'; public static final int MD5_HEX_LENGTH = 32; + // It should say, the tablename encoded in the region ends with !, + // but the last region's tablename ends with " + public static final int NOT_END_OF_TABLE = 33; // The ascii for ! + + // This must come after NOT_END_OF_TABLE to work + public static final int END_OF_TABLE = NOT_END_OF_TABLE + 1; + public static final String ROOT_REGION_ENCODING = "1756551925"; + public static final String META_REGION_ENCODING = "845716343"; /** * Does region name contain its encoded name? @@ -108,7 +123,7 @@ implements WritableComparable { String encodedName; if (hasEncodedName(regionName)) { // region is in new format: - // ,,/encodedName/ + // ,,/encodedName/ encodedName = Bytes.toString(regionName, regionName.length - MD5_HEX_LENGTH - 1, MD5_HEX_LENGTH); @@ -125,15 +140,19 @@ implements WritableComparable { /** * Use logging. * @param encodedRegionName The encoded regionname. - * @return -ROOT- if passed 70236052 or - * .META. if passed 1028785192 else returns + * @return -ROOT- if passed + * HRegionInfo.ROOT_REGION_ENCODING or + * .META. if passed + * HRegionInfo.META_REGION_ENCODING else returns * encodedRegionName */ public static String prettyPrint(final String encodedRegionName) { - if (encodedRegionName.equals("70236052")) { + if (encodedRegionName.equals(ROOT_REGION_ENCODING)) { return encodedRegionName + "/-ROOT-"; - } else if (encodedRegionName.equals("1028785192")) { - return encodedRegionName + "/.META."; + } else { + if (encodedRegionName.equals(META_REGION_ENCODING)) { + return encodedRegionName + "/.META."; + } } return encodedRegionName; } @@ -143,11 +162,12 @@ implements WritableComparable { /** HRegionInfo for root region */ public static final HRegionInfo ROOT_REGIONINFO = - new HRegionInfo(0L, Bytes.toBytes("-ROOT-")); + new HRegionInfo(0L, HConstants.ROOT_TABLE_NAME); /** HRegionInfo for first meta region */ - public static final HRegionInfo FIRST_META_REGIONINFO = - new HRegionInfo(1L, Bytes.toBytes(".META.")); + public static final HRegionInfo + FIRST_META_REGIONINFO = + new HRegionInfo(1L, HConstants.META_TABLE_NAME); private byte [] endKey = HConstants.EMPTY_BYTE_ARRAY; // This flag is in the parent of a split while the parent is still referenced @@ -217,6 +237,32 @@ implements WritableComparable { this.tableName = other.getTableDesc().getName(); } + + /** + * Used only for migration + * @param other HRegionInfoForMigration + */ + public HRegionInfo(HRegionInfo090x2 other) { + super(); + this.endKey = other.getEndKey(); + this.offLine = other.isOffline(); + this.regionId = other.getRegionId(); + this.split = other.isSplit(); + this.startKey = other.getStartKey(); + this.hashCode = other.hashCode(); + this.encodedName = other.getEncodedName(); + this.tableName = other.getTableName(); + if (Arrays.equals(this.tableName, HConstants.META_TABLE_NAME) || + Arrays.equals(this.tableName, HConstants.ROOT_TABLE_NAME)) { + this.regionName = createRegionName(tableName, startKey, endKey, regionId, false); + } else { + this.regionName = createRegionName(tableName, startKey, endKey, regionId, true); + } + this.regionNameStr = Bytes.toStringBinary(this.regionName); + + } + + public HRegionInfo(final byte[] tableName) { this(tableName, null, null); } @@ -276,7 +322,11 @@ implements WritableComparable { this.offLine = false; this.regionId = regionid; - this.regionName = createRegionName(this.tableName, startKey, regionId, true); + this.regionName = createRegionName(this.tableName, + startKey, + endKey, + regionId, + true); this.regionNameStr = Bytes.toStringBinary(this.regionName); this.split = split; @@ -287,7 +337,19 @@ implements WritableComparable { setHashCode(); } - /** + private byte[] createRegionName(byte[] tableName, + byte[] startKey, + byte[] endKey, + long regionId, + boolean newFormat) { + return createRegionName(tableName, + startKey, + endKey, + Long.toString(regionId), + newFormat); + } + + /** * Costruct a copy of another HRegionInfo * * @param other @@ -307,33 +369,28 @@ implements WritableComparable { } - /** - * Make a region name of passed parameters. - * @param tableName - * @param startKey Can be null - * @param regionid Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). - * @return Region name made of passed tableName, startKey and id - */ - public static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final long regionid, boolean newFormat) { - return createRegionName(tableName, startKey, Long.toString(regionid), newFormat); - } - - /** - * Make a region name of passed parameters. - * @param tableName - * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). - * @return Region name made of passed tableName, startKey and id - */ - public static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final String id, boolean newFormat) { - return createRegionName(tableName, startKey, Bytes.toBytes(id), newFormat); - } + /** + * Make a region name of passed parameters. + * + * + * @param tableName + * @param endKey Can be null + * @param regionid Region id (Usually timestamp from when region was + * created). + * @param newFormat should we create the region name in the new format + * (such that it contains its encoded name?). + * @return Region name made of passed tableName, endKey and id + */ + public static byte[] createRegionName(final byte[] tableName, + final byte[] endKey, + final long regionid, + boolean newFormat) { + return createRegionName(tableName, + null, + endKey, + Long.toString(regionid), + newFormat); + } /** * Make a region name of passed parameters. @@ -344,48 +401,114 @@ implements WritableComparable { * (such that it contains its encoded name?). * @return Region name made of passed tableName, startKey and id */ - public static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final byte [] id, boolean newFormat) { - byte [] b = new byte [tableName.length + 2 + id.length + - (startKey == null? 0: startKey.length) + - (newFormat ? (MD5_HEX_LENGTH + 2) : 0)]; - - int offset = tableName.length; - System.arraycopy(tableName, 0, b, 0, offset); - b[offset++] = DELIMITER; - if (startKey != null && startKey.length > 0) { - System.arraycopy(startKey, 0, b, offset, startKey.length); - offset += startKey.length; + public static byte [] createRegionName(final byte[] tableName, + final byte[] startKey, + final byte[] endKey, + final String id, + boolean newFormat) { + return createRegionName(tableName, + startKey, + endKey, + Bytes.toBytes(id), + newFormat); + } + /** + * Make a region name of passed parameters. + * + * @param tableName + * @param startKey Can be null + * @param endKey Can be null + * @param id Region id (Usually timestamp from when region was + * created). + * @param newFormat should we create the region name in the new format + * (such that it contains its encoded name?). + * @return Region name made of passed tableName, endKey and id + */ + + public static byte[] createRegionName(final byte[] tableName, + final byte[] startKey, + final byte[] endKey, + final byte[] id, + boolean newFormat) { + // Allocate room for the tablename along with the end of table + // marker and the delimiter. + int allocation = tableName.length + 2; + + // If the endKey is null just allocate space for the delimiter, else + // allocate enough for the key and a delimiter + allocation += endKey == null ? 1 : endKey.length + 1; + allocation += id == null ? 0 : id.length; + + ByteBuffer byteArrayDataOutput = ByteBuffer.allocate(allocation); + byteArrayDataOutput.put(tableName); + + if (endKey == null || endKey.length <= 0) { + byteArrayDataOutput.put((byte) END_OF_TABLE); + byteArrayDataOutput.put((byte)DELIMITER); + } else { + byteArrayDataOutput.put((byte) NOT_END_OF_TABLE); + byteArrayDataOutput.put((byte)DELIMITER); + byteArrayDataOutput.put(endKey); + } + byteArrayDataOutput.put((byte)DELIMITER); + + if (id != null && id.length > 0) { + byteArrayDataOutput.put(id); + } + + // Add the encoding bit to the regionname if it is a new style region. + if (newFormat) { + return addEncoding(byteArrayDataOutput.array(), + tableName, + startKey, + id); + } else { + // Old style regions have no encoding so just return. + return byteArrayDataOutput.array(); + } } - b[offset++] = DELIMITER; - System.arraycopy(id, 0, b, offset, id.length); - offset += id.length; - - if (newFormat) { - // - // Encoded name should be built into the region name. - // - // Use the region name thus far (namely, ,,) - // to compute a MD5 hash to be used as the encoded name, and append - // it to the byte buffer. - // - String md5Hash = MD5Hash.getMD5AsHex(b, 0, offset); - byte [] md5HashBytes = Bytes.toBytes(md5Hash); - - if (md5HashBytes.length != MD5_HEX_LENGTH) { - LOG.error("MD5-hash length mismatch: Expected=" + MD5_HEX_LENGTH + - "; Got=" + md5HashBytes.length); - } - // now append the bytes '..' to the end - b[offset++] = ENC_SEPARATOR; - System.arraycopy(md5HashBytes, 0, b, offset, MD5_HEX_LENGTH); - offset += MD5_HEX_LENGTH; - b[offset++] = ENC_SEPARATOR; + /** + * Encoded name should be built into the region name. + * As not to change encodings between the pre-HBASE-2600 + * versions and the post versions + * We want to use the startkey as opposed to the endkey + * here so as not to change encodings. + * @param regionName The regionName without an encoding + * @param tableName The table name. + * @param startKey The startKey. + * @param id The region id + * @return The region with the encoding glued on. + */ + private static byte[] addEncoding(final byte[] regionName, + final byte[] tableName, + final byte[] startKey, + final byte[] id) { + + + byte [] oldRegionKey = Bytes.add(tableName, new byte[] {DELIMITER}); + if (startKey != null) { + oldRegionKey = Bytes.add(oldRegionKey, startKey, new byte[] {DELIMITER}); + } else { + oldRegionKey = Bytes.add(oldRegionKey, + HConstants.EMPTY_BYTE_ARRAY, + new byte[] {DELIMITER}); + } + oldRegionKey = Bytes.add(oldRegionKey, id); + + final byte[] md5HashBytes = MD5Hash.getMD5AsHex(oldRegionKey).getBytes(); + + if (md5HashBytes.length != MD5_HEX_LENGTH) { + LOG.error("MD5-hash length mismatch: Expected=" + MD5_HEX_LENGTH + + "; Got=" + md5HashBytes.length); + } + + byte[] encoding = Bytes.add(new byte[]{ENC_SEPARATOR}, + md5HashBytes, + new byte[]{ENC_SEPARATOR}); + return Bytes.add(regionName, encoding); } - - return b; - } + /** * Gets the table name from the specified region name. @@ -396,7 +519,8 @@ implements WritableComparable { int offset = -1; for (int i = 0; i < regionName.length; i++) { if (regionName[i] == DELIMITER) { - offset = i; + // We are off by one because we need to remove the end of region marker + offset = i - 1; break; } } @@ -405,47 +529,49 @@ implements WritableComparable { return tableName; } - /** - * Separate elements of a regionName. - * @param regionName - * @return Array of byte[] containing tableName, startKey and id - * @throws IOException - */ - public static byte [][] parseRegionName(final byte [] regionName) - throws IOException { - int offset = -1; - for (int i = 0; i < regionName.length; i++) { - if (regionName[i] == DELIMITER) { - offset = i; - break; - } - } - if(offset == -1) throw new IOException("Invalid regionName format"); - byte [] tableName = new byte[offset]; - System.arraycopy(regionName, 0, tableName, 0, offset); - offset = -1; - for (int i = regionName.length - 1; i > 0; i--) { - if(regionName[i] == DELIMITER) { - offset = i; - break; - } - } - if(offset == -1) throw new IOException("Invalid regionName format"); - byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; - if(offset != tableName.length + 1) { - startKey = new byte[offset - tableName.length - 1]; - System.arraycopy(regionName, tableName.length + 1, startKey, 0, - offset - tableName.length - 1); + /** + * Separate elements of a regionName. + * + * @param regionName + * @return Array of byte[] containing the tablename, endKey and id + * @throws IOException + */ + public static byte[][] parseRegionName(final byte[] regionName) + throws IOException { + int offset = -1; + + for (int i = 0; i < regionName.length; i++) { + if (regionName[i] == DELIMITER) { + offset = i - 1; + break; + } + } + if (offset == -1) throw new IOException("Invalid regionName format"); + byte[] tableName = new byte[offset]; + System.arraycopy(regionName, 0, tableName, 0, offset); + offset = -1; + for (int i = regionName.length - 1; i > 0; i--) { + if (regionName[i] == DELIMITER) { + offset = i; + break; + } + } + if (offset == -1) throw new IOException("Invalid regionName format"); + byte[] endKey = HConstants.EMPTY_BYTE_ARRAY; + if (offset != tableName.length + 1) { + endKey = new byte[offset - tableName.length - 1]; + System.arraycopy(regionName, tableName.length + 1, endKey, 0, + offset - tableName.length - 1); + } + byte[] id = new byte[regionName.length - offset - 1]; + System.arraycopy(regionName, offset + 1, id, 0, + regionName.length - offset - 1); + byte[][] elements = new byte[3][]; + elements[0] = tableName; + elements[1] = endKey; + elements[2] = id; + return elements; } - byte [] id = new byte[regionName.length - offset - 1]; - System.arraycopy(regionName, offset + 1, id, 0, - regionName.length - offset - 1); - byte [][] elements = new byte[3][]; - elements[0] = tableName; - elements[1] = startKey; - elements[2] = id; - return elements; - } /** @return the regionId */ public long getRegionId(){ @@ -723,36 +849,58 @@ implements WritableComparable { // because freaks out if its not the current classes' version. This method // can deserialize version 0 and version 1 of HRI. byte version = in.readByte(); - if (version == 0) { - // This is the old HRI that carried an HTD. Migrate it. The below - // was copied from the old 0.90 HRI readFields. - this.endKey = Bytes.readByteArray(in); - this.offLine = in.readBoolean(); - this.regionId = in.readLong(); - this.regionName = Bytes.readByteArray(in); - this.regionNameStr = Bytes.toStringBinary(this.regionName); - this.split = in.readBoolean(); - this.startKey = Bytes.readByteArray(in); - try { - HTableDescriptor htd = new HTableDescriptor(); - htd.readFields(in); - this.tableName = htd.getName(); - } catch(EOFException eofe) { - throw new IOException("HTD not found in input buffer", eofe); - } - this.hashCode = in.readInt(); - } else if (getVersion() == version) { - this.endKey = Bytes.readByteArray(in); - this.offLine = in.readBoolean(); - this.regionId = in.readLong(); - this.regionName = Bytes.readByteArray(in); - this.regionNameStr = Bytes.toStringBinary(this.regionName); - this.split = in.readBoolean(); - this.startKey = Bytes.readByteArray(in); - this.tableName = Bytes.readByteArray(in); - this.hashCode = in.readInt(); - } else { - throw new IOException("Non-migratable/unknown version=" + getVersion()); + switch (version) { + case HRegionInfo.VERSION: + this.endKey = Bytes.readByteArray(in); + this.offLine = in.readBoolean(); + this.regionId = in.readLong(); + this.regionName = Bytes.readByteArray(in); + this.split = in.readBoolean(); + this.startKey = Bytes.readByteArray(in); + this.tableName = Bytes.readByteArray(in); + this.hashCode = in.readInt(); + this.regionNameStr = Bytes.toStringBinary(this.regionName); + break; + case HRegionInfo090x2.VERSION: + this.endKey = Bytes.readByteArray(in); + this.offLine = in.readBoolean(); + this.regionId = in.readLong(); + this.regionName = Bytes.readByteArray(in); + this.split = in.readBoolean(); + this.startKey = Bytes.readByteArray(in); + this.tableName = Bytes.readByteArray(in); + this.hashCode = in.readInt(); + if (Arrays.equals(this.tableName, HConstants.META_TABLE_NAME) || + Arrays.equals(this.tableName, HConstants.ROOT_TABLE_NAME)) { + this.regionName = createRegionName(tableName, startKey, endKey, regionId, false); + } else { + this.regionName = createRegionName(tableName, startKey, endKey, regionId, true); + } + this.regionNameStr = Bytes.toStringBinary(this.regionName); + break; + case VERSION_PRE_092: + // This is the old HRI that carried an HTD. Migrate it. The below + // was copied from the old 0.90 HRI readFields. + this.endKey = Bytes.readByteArray(in); + this.offLine = in.readBoolean(); + this.regionId = in.readLong(); + this.regionName = Bytes.readByteArray(in); + this.split = in.readBoolean(); + this.startKey = Bytes.readByteArray(in); + try { + HTableDescriptor htd = new HTableDescriptor(); + htd.readFields(in); + this.tableName = htd.getName(); + } catch (EOFException eofe) { + throw new IOException("HTD not found in input buffer", eofe); + } + this.hashCode = in.readInt(); + this.regionNameStr = Bytes.toStringBinary(this.regionName); + break; + default: + String errMsg = "Non-migratable/unknown version=" + version; + errMsg += " .It should be= " + getVersion(); + throw new IOException(errMsg); } } diff --git src/main/java/org/apache/hadoop/hbase/HServerLoad.java src/main/java/org/apache/hadoop/hbase/HServerLoad.java index fcf529f..5d703b9 100644 --- src/main/java/org/apache/hadoop/hbase/HServerLoad.java +++ src/main/java/org/apache/hadoop/hbase/HServerLoad.java @@ -274,30 +274,8 @@ implements WritableComparable { public long getWriteRequestsCount() { return writeRequestsCount; } - - /** - * @return The current total size of root-level indexes for the region, in KB. - */ - public int getRootIndexSizeKB() { - return rootIndexSizeKB; - } - - /** - * @return The total size of all index blocks, not just the root level, in KB. - */ - public int getTotalStaticIndexSizeKB() { - return totalStaticIndexSizeKB; - } /** - * @return The total size of all Bloom filter blocks, not just loaded into the - * block cache, in KB. - */ - public int getTotalStaticBloomSizeKB() { - return totalStaticBloomSizeKB; - } - - /** * @return the total number of kvs in current compaction */ public long getTotalCompactingKVs() { diff --git src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index fc5e53e..e544b5b 100644 --- src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -35,8 +35,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.io.hfile.Compression; -import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.WritableComparable; @@ -262,13 +260,65 @@ public class HTableDescriptor implements WritableComparable { } } - /* - * Set meta flags on this table. - * IS_ROOT_KEY is set if its a -ROOT- table - * IS_META_KEY is set either if its a -ROOT- or a .META. table - * Called by constructors. - * @param name - */ + /** + * This gives you a stop row for scanning for a particular region in meta. + * It's important that we don't scan past the end of the regions for a + * particular table. + * + * @param tableName The tablename in meta in which we want + * to provide a stopRow for scanning for. + * @return The stopRow to prevent scanning past the last region in meta for + * a table. + */ + + public static byte[] getStopRow(final byte[] tableName) { + final int allocation = tableName.length + 3; + byte[] b = new byte[allocation]; + int offset = tableName.length; + System.arraycopy(tableName, 0, b, 0, offset); + + b[offset++] = (byte) (HRegionInfo.END_OF_TABLE + 1); + b[offset++] = HRegionInfo.DELIMITER; + b[offset++] = HRegionInfo.DELIMITER; + return b; + } + + /** + * Get the first possible region that could match a particular + * tablename and searchrow. + * + * @param tableName The tableName in which we are searching for the matching + * region. + * @param searchRow The row in which we are searching for the matching region + * @return The first possible matching region. + */ + public static byte[] getStartRow(final byte[] tableName, + final byte[] searchRow) { + + if (searchRow == null || searchRow.length == 0) { + final int allocation = tableName.length + 3; + byte[] startRow = new byte[allocation]; + System.arraycopy(tableName, 0, startRow, 0, tableName.length); + startRow[tableName.length] = HRegionInfo.NOT_END_OF_TABLE - 1; + startRow[tableName.length + 1] = HRegionInfo.DELIMITER; + startRow[tableName.length + 2] = HRegionInfo.DELIMITER; + return startRow; + } + + return HRegionInfo.createRegionName(tableName, + null, + searchRow, + HConstants.NINES, + false); + } + + /* + * Set meta flags on this table. + * IS_ROOT_KEY is set if its a -ROOT- table + * IS_META_KEY is set either if its a -ROOT- or a .META. table + * Called by constructors. + * @param name + */ private void setMetaFlags(final byte [] name) { setRootRegion(Bytes.equals(name, HConstants.ROOT_TABLE_NAME)); setMetaRegion(isRootRegion() || diff --git src/main/java/org/apache/hadoop/hbase/KeyValue.java src/main/java/org/apache/hadoop/hbase/KeyValue.java index 243d76f..1535de5 100644 --- src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -1916,10 +1916,10 @@ public class KeyValue implements Writable, HeapSize { public static class RootKeyComparator extends MetaKeyComparator { public int compareRows(byte [] left, int loffset, int llength, byte [] right, int roffset, int rlength) { - // Rows look like this: .META.,ROW_FROM_META,RID - // LOG.info("ROOT " + Bytes.toString(left, loffset, llength) + + // rows look like this: .META.!,ROW_FROM_META,RID + // LOG.info("ROOT " + Bytes.toString(left, loffset, llength) + // "---" + Bytes.toString(right, roffset, rlength)); - final int metalength = 7; // '.META.' length + final int metalength = 8; // '.META.' length int lmetaOffsetPlusDelimiter = loffset + metalength; int leftFarDelimiter = getDelimiterInReverse(left, lmetaOffsetPlusDelimiter, diff --git src/main/java/org/apache/hadoop/hbase/catalog/MetaMigratev2.java src/main/java/org/apache/hadoop/hbase/catalog/MetaMigratev2.java new file mode 100644 index 0000000..409671c --- /dev/null +++ src/main/java/org/apache/hadoop/hbase/catalog/MetaMigratev2.java @@ -0,0 +1,324 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.catalog; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.catalog.MetaReader.Visitor; +import org.apache.hadoop.hbase.client.*; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.MasterFileSystem; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.migration.HRegionInfo090x2; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.Writables; +import org.apache.zookeeper.KeeperException; + +import java.io.IOException; +import java.util.*; + +public class MetaMigratev2 { + + private static final Log LOG = LogFactory.getLog(MetaMigratev2.class); + private MasterFileSystem masterFileSystem; + private FileSystem fs; + private MasterServices masterServices; + + /** + * A class to check and update the version of Root and Meta + * + * @param hMaster The HMaster's master service + */ + public MetaMigratev2(MasterServices hMaster) { + masterServices = hMaster; + masterFileSystem = masterServices.getMasterFileSystem(); + fs = masterFileSystem.getFileSystem(); + } + + /** + * Meta visitor that migrates the info:regioninfo as it visits. + */ + class MigratingMetaVisitor implements Visitor { + private final MasterServices services; + final List htds = new ArrayList(); + + MigratingMetaVisitor(final MasterServices services) { + this.services = services; + } + + @Override + public boolean visit(Result r) throws IOException { + if (r == null || r.isEmpty()) return true; + // Check info:regioninfo, info:splitA, and info:splitB. Make sure all + // have migrated HRegionInfos... that there are no leftover 090 version + // HRegionInfos. + byte[] hriBytes = getBytes(r, HConstants.REGIONINFO_QUALIFIER); + // Presumes that an edit updating all three cells either succeeds or + // doesn't -- that we don't have case of info:regioninfo migrated but not + // info:splitA. + if (isMigrated(hriBytes)) return true; + // OK. Need to migrate this row in meta. + HRegionInfo090x2 hri090 = getHRegionInfo090x2(hriBytes); + Delete d = new Delete(hri090.getRegionName()); + + // This will 'migrate' the hregioninfo from 090 version to 092. + HRegionInfo hri = new HRegionInfo(hri090); + LOG.error("Migrating from hri090:" + hri090 + "=> to hri:" + hri); + + // Now make a put to write back to meta. + Put p = new Put(hri.getRegionName()); + p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, + Writables.getBytes(hri)); + + // Now check info:splitA and info:splitB if present. Migrate these too. + checkSplit(r, p, HConstants.SPLITA_QUALIFIER); + checkSplit(r, p, HConstants.SPLITB_QUALIFIER); + + MetaEditor.putToMetaTable(this.services.getCatalogTracker(), p); + MetaEditor.deleteMetaTable(this.services.getCatalogTracker(), d); + + htds.add(hri); + return true; + } + } + + void checkSplit(final Result r, final Put p, final byte[] which) + throws IOException { + //XXX do I need to remove splits, we need a test for this + byte[] hriSplitBytes = getBytes(r, which); + if (!isMigrated(hriSplitBytes)) { + // This will convert the HRI from 092v1 to 092v2 HRI. + // If we can't migrate the children throw an IOException. + HRegionInfo090x2 hri902 = Writables.getHRegionInfo90x2(hriSplitBytes); + // This will 'migrate' the hregioninfo from 090x2 version to 092. + HRegionInfo hri = new HRegionInfo(hri902); + + p.add(HConstants.CATALOG_FAMILY, which, Writables.getBytes(hri)); + } + } + + /** + * Get the version of roottable + * + * @return the version of the root table + * @throws IOException if we can't recover the hbase version. + */ + public int getVersion() throws IOException { + Path rootDir = masterFileSystem.getRootDir(); + return Integer.parseInt(FSUtils.getVersion(fs, rootDir)); + } + + /** + * Update legacy META rows + * + * @return List of table descriptors. + * @throws IOException In case we can't metaScan + */ + List updateMeta() throws IOException { + MigratingMetaVisitor v = new MigratingMetaVisitor(masterServices); + MetaReader.fullScan(masterServices.getCatalogTracker(), v); + + AssignmentManager assignmentManager = masterServices.getAssignmentManager(); + assignmentManager.setRegionsToReopen(v.htds); + return v.htds; + } + + boolean isRootOnline() throws IOException, InterruptedException { + Configuration configuration = masterServices.getConfiguration(); + int defaultTimeout = 1000; + long timeout = configuration.getLong("hbase.catalog.verification.timeout", + defaultTimeout); + CatalogTracker catalogTracker = masterServices.getCatalogTracker(); + return catalogTracker.verifyRootRegionLocation(timeout); + } + + boolean isMetaOnline() throws IOException, InterruptedException { + Configuration configuration = masterServices.getConfiguration(); + int defaultTimeout = 1000; + long timeout = configuration.getLong("hbase.catalog.verification.timeout", + defaultTimeout); + CatalogTracker catalogTracker = masterServices.getCatalogTracker(); + return catalogTracker.verifyMetaRegionLocation(timeout); + } + + void onlineRoot() throws KeeperException, InterruptedException, IOException { + if (isRootOnline()) { + LOG.error("Root is already online"); + return; + } + AssignmentManager assignmentManager = masterServices.getAssignmentManager(); + + Path metaName = new Path(masterFileSystem.getRootDir(), + new Path(Bytes.toString(HConstants.ROOT_TABLE_NAME))); + + Path oldRootRegion = new Path(metaName, "70236052"); + Path newRootRegion = new Path(metaName, HRegionInfo.ROOT_REGION_ENCODING); + + // We are recreating the new region + if (fs.exists(newRootRegion)) { + fs.delete(newRootRegion, true); + } + + //We can't support split meta regions + if (fs.exists(oldRootRegion)) { + fs.delete(oldRootRegion , true); + } + + assignmentManager.assignRoot(); + + //This guarantees that the transition has completed + assignmentManager.waitForAssignment(HRegionInfo.ROOT_REGIONINFO); + } + + + /** + * Update the ROOT with new HRI. (HRI with no HTD) + * + * @return List of table descriptors + * @throws IOException In case we can't metaScan + */ + List updateRoot() throws IOException { + HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO; + HTable rootTable = new HTable(masterServices.getConfiguration(), + HConstants.ROOT_TABLE_NAME); + Result result = rootTable.get(new Get(hri.getRegionName())); + ArrayList retVal = new ArrayList(); + + // Make sure the preexisting root doesn't exist and we aren't overriding. + if (result.isEmpty()) { + // Now make a put to write back to meta. + Put p = new Put(hri.getRegionName()); + p.add(HConstants.CATALOG_FAMILY, + HConstants.REGIONINFO_QUALIFIER, + Writables.getBytes(hri)); + MetaEditor.putToRootTable(masterServices.getCatalogTracker(), p); + retVal.add(hri); + } + return retVal; + } + + + /** + * @param r Result to dig in. + * @param qualifier Qualifier to look at in the passed r. + * @return Bytes for an HRegionInfo or null if no bytes or empty bytes found. + */ + byte[] getBytes(final Result r, final byte[] qualifier) { + byte[] hriBytes = r.getValue(HConstants.CATALOG_FAMILY, qualifier); + if (hriBytes == null || hriBytes.length <= 0) return null; + return hriBytes; + } + + boolean isMigrated(final byte[] hriBytes) { + if (hriBytes == null || hriBytes.length <= 0) return true; + // Else, what version this HRegionInfo instance is at. The first byte + // is the version byte in a serialized HRegionInfo. If its same as our + // current HRI, then nothing to do. + + byte version = hriBytes[0]; + if (version == HRegionInfo.VERSION) { + return true; + } else if (version == HRegionInfo090x2.VERSION || version == 0 ) { + return false; + } + // Unknown version. Return true that its 'migrated' but log warning. + // Should 'never' happen. + assert false : "Unexpected version; bytes=" + Bytes.toStringBinary(hriBytes); + return true; + } + + /** + * Update and Online the new root region with information from the old one. + * + * @return Migrated htds + * @throws IOException Changes to the filesystem can trigger. + * @throws InterruptedException Changes to the zookeeper can trigger. + * @throws KeeperException Changes to the zookeeper can trigger. + */ + public List updateAndOnlineRoot() + throws IOException, InterruptedException, KeeperException { + + onlineRoot(); + return updateRoot(); + } + + /** + * Update and Online the new meta region with information from the old one. + * Also if we complete here we can set the hbase file system version. + * + * @return Migrated htds + * @throws IOException Changes to the filesystem can trigger. + * @throws InterruptedException Changes to the zookeeper can trigger. + * @throws KeeperException Changes to the zookeeper can trigger. + */ + public List updateAndOnlineMeta() + throws KeeperException, IOException, InterruptedException { + onlineMeta(); + List htds = updateMeta(); + FSUtils.setVersion(fs, + masterFileSystem.getRootDir(), + HConstants.FILE_SYSTEM_VERSION_INT, + 10); + + return htds; + } + + private void onlineMeta() throws KeeperException, IOException, InterruptedException { + + if (isMetaOnline()) { + LOG.error("Meta is already online"); + return; + } + + Path metaName = new Path(masterFileSystem.getRootDir(), + new Path(Bytes.toString(HConstants.META_TABLE_NAME))); + Path oldMetaRegion = new Path(metaName, "1028785192"); + Path newMetaRegion = new Path(metaName, HRegionInfo.META_REGION_ENCODING); + + if (fs.exists(newMetaRegion)) { + fs.delete(newMetaRegion, true); + } + + fs.rename(oldMetaRegion, newMetaRegion); + this.masterServices.getAssignmentManager().assignMeta(); + } + + /** + * Get HRegionInfoForMigration serialized from bytes. + * + * @param bytes serialized bytes + * @return An instance of a 090 HRI or null if we failed deserialize + */ + HRegionInfo090x2 getHRegionInfo090x2(final byte[] bytes) { + if (bytes == null || bytes.length == 0) return null; + HRegionInfo090x2 hri = null; + try { + hri = (HRegionInfo090x2) Writables.getWritable(bytes, new HRegionInfo090x2()); + } catch (IOException ioe) { + LOG.warn("Failed deserialize as a 090 HRegionInfo); bytes=" + + Bytes.toStringBinary(bytes), ioe); + } + return hri; + } +} diff --git src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java index 0129ee9..fff74dd 100644 --- src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java +++ src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java @@ -447,7 +447,9 @@ public class MetaReader { this.results.add(this.current); } }; - fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableNameBytes)); + byte[] startRow = HTableDescriptor.getStartRow(tableName.getBytes(), + HConstants.EMPTY_BYTE_ARRAY); + fullScan(catalogTracker, visitor, startRow); // If visitor has results >= 1 then table exists. return visitor.getResults().size() >= 1; } @@ -506,18 +508,7 @@ public class MetaReader { return Bytes.equals(tableName, current.getTableName()); } - /** - * @param tableName - * @return Place to start Scan in .META. when passed a - * tableName; returns <tableName&rt; <,&rt; <,&rt; - */ - static byte [] getTableStartRowForMeta(final byte [] tableName) { - byte [] startRow = new byte[tableName.length + 2]; - System.arraycopy(tableName, 0, startRow, 0, tableName.length); - startRow[startRow.length - 2] = HRegionInfo.DELIMITER; - startRow[startRow.length - 1] = HRegionInfo.DELIMITER; - return startRow; - } + /** * This method creates a Scan object that will only scan catalog rows that @@ -531,12 +522,12 @@ public class MetaReader { public static Scan getScanForTableName(byte[] tableName) { String strName = Bytes.toString(tableName); // Start key is just the table name with delimiters - byte[] startKey = Bytes.toBytes(strName + ",,"); + byte[] startKey = HTableDescriptor.getStartRow(tableName, + HConstants.EMPTY_BYTE_ARRAY); // Stop key appends the smallest possible char to the table name - byte[] stopKey = Bytes.toBytes(strName + " ,,"); + byte[] stopKey = HTableDescriptor.getStopRow(tableName); - Scan scan = new Scan(startKey); - scan.setStopRow(stopKey); + Scan scan = new Scan(startKey, stopKey); return scan; } @@ -601,8 +592,10 @@ public class MetaReader { this.results.add(this.current); } }; - fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableName), - Bytes.equals(tableName, HConstants.META_TABLE_NAME)); + byte[] startRow = HTableDescriptor.getStartRow(tableName, + HConstants.EMPTY_BYTE_ARRAY); + boolean isMeta = Bytes.equals(tableName, HConstants.META_TABLE_NAME); + fullScan(catalogTracker, visitor, startRow, isMeta); return visitor.getResults(); } diff --git src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 16e4017..995e261 100644 --- src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -1588,8 +1588,8 @@ public class HBaseAdmin implements Abortable, Closeable { private HRegionLocation getFirstMetaServerForTable(final byte [] tableName) throws IOException { - return connection.locateRegion(HConstants.META_TABLE_NAME, - HRegionInfo.createRegionName(tableName, null, HConstants.NINES, false)); + byte[] metaRowKey = HTableDescriptor.getStartRow(tableName, null); + return connection.locateRegion(HConstants.META_TABLE_NAME, metaRowKey); } /** diff --git src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index b2a5463..d1df47a 100644 --- src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -937,8 +937,12 @@ public class HConnectionManager { zkw.close(); } } else if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { - return locateRegionInMeta(HConstants.ROOT_TABLE_NAME, tableName, row, - useCache, metaRegionLock); + //HARD CODED TO POINT TO THE FIRST META TABLE + return locateRegionInMeta(HConstants.ROOT_TABLE_NAME, + HConstants.META_TABLE_NAME, + HConstants.EMPTY_BYTE_ARRAY, + useCache, + metaRegionLock); } else { // Region not in the cache - have to go to the meta RS return locateRegionInMeta(HConstants.META_TABLE_NAME, tableName, row, @@ -1006,9 +1010,51 @@ public class HConnectionManager { } } + + private HRegionInfo resultToHRegionInfo(final Result result, + byte[] tableName, + byte[] parentTable) + throws IOException { + + if (result == null) { + throw new TableNotFoundException(Bytes.toString(tableName)); + } + + byte[] value = result.getValue(HConstants.CATALOG_FAMILY, + HConstants.REGIONINFO_QUALIFIER); + + if (value == null || value.length == 0) { + throw new IOException("HRegionInfo was null or empty in " + + Bytes.toString(parentTable) + + ", row=" + + result); + } + + HRegionInfo regionInfo = Writables.getHRegionInfo(value); + + // possible we got a region of a different table... + + if (!Bytes.equals(regionInfo.getTableName(), tableName)) { + String errorMsg = "Table '" + Bytes.toString(tableName) + "' was not found, got: "; + if (regionInfo != null) { + errorMsg += Bytes.toString(regionInfo.getTableName()) + "."; + } + throw new TableNotFoundException(errorMsg); + } + + return regionInfo; + } + private boolean finishedScanningForRegion(HRegionInfo regionInfo) { + if (regionInfo == null || regionInfo.isOffline() || regionInfo.isSplit()) { + return false; + } + return true; + } + /* * Search one of the meta tables (-ROOT- or .META.) for the HRegionLocation - * info that contains the table and row we're seeking. + * info that contains the table and row we're seeking. If the row is null or 0 length + * then return a key which scans to the first meta key for the table. */ private HRegionLocation locateRegionInMeta(final byte [] parentTable, final byte [] tableName, final byte [] row, boolean useCache, @@ -1024,17 +1070,13 @@ public class HConnectionManager { } } - // build the key of the meta region we should be looking for. - // the extra 9's on the end are necessary to allow "exact" matches - // without knowing the precise region names. - byte [] metaKey = HRegionInfo.createRegionName(tableName, row, - HConstants.NINES, false); + final byte [] metaKey = HTableDescriptor.getStartRow(tableName, row); for (int tries = 0; true; tries++) { if (tries >= numRetries) { throw new NoServerForRegionException("Unable to find region for " + Bytes.toStringBinary(row) + " after " + numRetries + " tries."); } - + Result regionInfoRow = null; HRegionLocation metaLocation = null; try { // locate the root or meta region @@ -1044,10 +1086,11 @@ public class HConnectionManager { HRegionInterface server = getHRegionConnection(metaLocation.getHostname(), metaLocation.getPort()); - Result regionInfoRow = null; // This block guards against two threads trying to load the meta // region at the same time. The first will load the meta region and // the second will use the value that the first one found. + + HRegionInfo regionInfo = null; synchronized (regionLockObject) { // If the parent table is META, we may want to pre-fetch some // region info into the global region cache for this table. @@ -1069,29 +1112,42 @@ public class HConnectionManager { deleteCachedLocation(tableName, row); } - // Query the root or meta region for the location of the meta region - regionInfoRow = server.getClosestRowBefore( - metaLocation.getRegionInfo().getRegionName(), metaKey, - HConstants.CATALOG_FAMILY); - } - if (regionInfoRow == null) { - throw new TableNotFoundException(Bytes.toString(tableName)); - } - byte [] value = regionInfoRow.getValue(HConstants.CATALOG_FAMILY, - HConstants.REGIONINFO_QUALIFIER); - if (value == null || value.length == 0) { - throw new IOException("HRegionInfo was null or empty in " + - Bytes.toString(parentTable) + ", row=" + regionInfoRow); - } - // convert the row result into the HRegionLocation we need! - HRegionInfo regionInfo = (HRegionInfo) Writables.getWritable( - value, new HRegionInfo()); - // possible we got a region of a different table... - if (!Bytes.equals(regionInfo.getTableName(), tableName)) { - throw new TableNotFoundException( - "Table '" + Bytes.toString(tableName) + "' was not found, got: " + - Bytes.toString(regionInfo.getTableName()) + "."); + + byte[] stopRow = HTableDescriptor.getStopRow(tableName); + Scan scan = new Scan(metaKey).addFamily(HConstants.CATALOG_FAMILY); + byte[] regionName = metaLocation.getRegionInfo().getRegionName(); + long scannerId = server.openScanner(regionName, scan); + + Result result; + do { + result = server.next(scannerId); + if (result != null ){ + regionInfoRow = result; + regionInfo = resultToHRegionInfo(regionInfoRow, + tableName, + parentTable); + if (! regionInfo.isOffline()) { + break; + } + } + } while (result != null); + + // We haven't cleared the meta entry out of the table yet + if (result == null) { + throw new TableNotFoundException("Table '" + Bytes.toString(tableName) + + " we searched for the StartKey: " + Bytes.toString(metaKey) + + " startKey lastChar's int value: " + (int) metaKey[metaKey.length -3] + + " with the stopKey: " + Bytes.toString(stopRow) + + " stopRow lastChar's int value: " + (int) stopRow[stopRow.length -3] + + " with parentTable:" + Bytes.toString(parentTable)); + } + } + if (regionInfo == null) { + throw new TableNotFoundException("Table '" + + Bytes.toString(tableName) + "' was not found, got: " + + Bytes.toString(regionInfo.getTableName()) + "."); } + if (regionInfo.isSplit()) { throw new RegionOfflineException("the only available region for" + " the required row is a split parent," + @@ -1104,11 +1160,11 @@ public class HConnectionManager { regionInfo.getRegionNameAsString()); } - value = regionInfoRow.getValue(HConstants.CATALOG_FAMILY, - HConstants.SERVER_QUALIFIER); String hostAndPort = ""; - if (value != null) { - hostAndPort = Bytes.toString(value); + if (regionInfoRow.getValue(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER) != null) { + hostAndPort = Bytes.toString(regionInfoRow.getValue(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER)); } if (hostAndPort.equals("")) { throw new NoServerForRegionException("No server address listed " + diff --git src/main/java/org/apache/hadoop/hbase/client/HTable.java src/main/java/org/apache/hadoop/hbase/client/HTable.java index 8e7d7f7..e27b7e8 100644 --- src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -615,20 +615,6 @@ public class HTable implements HTableInterface { return allRegions; } - /** - * {@inheritDoc} - */ - @Override - public Result getRowOrBefore(final byte[] row, final byte[] family) - throws IOException { - return new ServerCallable(connection, tableName, row, operationTimeout) { - public Result call() throws IOException { - return server.getClosestRowBefore(location.getRegionInfo().getRegionName(), - row, family); - } - }.withRetries(); - } - /** * {@inheritDoc} */ diff --git src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java index 04150ad..348380a 100644 --- src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java +++ src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java @@ -134,23 +134,6 @@ public interface HTableInterface extends Closeable { Result[] get(List gets) throws IOException; /** - * Return the row that matches row exactly, - * or the one that immediately precedes it. - * - * @param row A row key. - * @param family Column family to include in the {@link Result}. - * @throws IOException if a remote or network exception occurs. - * @since 0.20.0 - * - * @deprecated As of version 0.92 this method is deprecated without - * replacement. - * getRowOrBefore is used internally to find entries in .META. and makes - * various assumptions about the table (which are true for .META. but not - * in general) to be efficient. - */ - Result getRowOrBefore(byte[] row, byte[] family) throws IOException; - - /** * Returns a scanner on the current table as specified by the {@link Scan} * object. * Note that the passed {@link Scan}'s start row and caching properties diff --git src/main/java/org/apache/hadoop/hbase/client/HTablePool.java src/main/java/org/apache/hadoop/hbase/client/HTablePool.java index 47381f4..9ac6735 100755 --- src/main/java/org/apache/hadoop/hbase/client/HTablePool.java +++ src/main/java/org/apache/hadoop/hbase/client/HTablePool.java @@ -376,11 +376,6 @@ public class HTablePool implements Closeable { } @Override - public Result getRowOrBefore(byte[] row, byte[] family) throws IOException { - return table.getRowOrBefore(row, family); - } - - @Override public ResultScanner getScanner(Scan scan) throws IOException { return table.getScanner(scan); } diff --git src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java index f404999..032f90c 100644 --- src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java +++ src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java @@ -33,8 +33,8 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Writables; @@ -143,44 +143,15 @@ public class MetaScanner { // if row is not null, we want to use the startKey of the row's region as // the startRow for the meta scan. byte[] startRow; - if (row != null) { - // Scan starting at a particular row in a particular table - assert tableName != null; - byte[] searchRow = - HRegionInfo.createRegionName(tableName, row, HConstants.NINES, - false); - HTable metaTable = null; - try { - metaTable = new HTable(configuration, HConstants.META_TABLE_NAME); - Result startRowResult = metaTable.getRowOrBefore(searchRow, - HConstants.CATALOG_FAMILY); - if (startRowResult == null) { - throw new TableNotFoundException("Cannot find row in .META. for table: " - + Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow)); - } - byte[] value = startRowResult.getValue(HConstants.CATALOG_FAMILY, - HConstants.REGIONINFO_QUALIFIER); - if (value == null || value.length == 0) { - throw new IOException("HRegionInfo was null or empty in Meta for " + - Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow)); - } - HRegionInfo regionInfo = Writables.getHRegionInfo(value); - - byte[] rowBefore = regionInfo.getStartKey(); - startRow = HRegionInfo.createRegionName(tableName, rowBefore, - HConstants.ZEROES, false); - } finally { - if (metaTable != null) { - metaTable.close(); - } - } - } else if (tableName == null || tableName.length == 0) { + byte[] stopRow; + + if (tableName == null || tableName.length == 0) { // Full META scan startRow = HConstants.EMPTY_START_ROW; + stopRow = null; } else { - // Scan META for an entire table - startRow = HRegionInfo.createRegionName( - tableName, HConstants.EMPTY_START_ROW, HConstants.ZEROES, false); + startRow = HTableDescriptor.getStartRow(tableName, row); + stopRow = HTableDescriptor.getStopRow(tableName); } // Scan over each meta region @@ -188,8 +159,15 @@ public class MetaScanner { int rows = Math.min(rowLimit, configuration.getInt( HConstants.HBASE_META_SCANNER_CACHING, HConstants.DEFAULT_HBASE_META_SCANNER_CACHING)); + do { - final Scan scan = new Scan(startRow).addFamily(HConstants.CATALOG_FAMILY); + Scan scan; + if (stopRow != null) { // Support full meta scans + scan = new Scan(startRow, stopRow).addFamily(HConstants.CATALOG_FAMILY); + } else { + scan = new Scan(startRow).addFamily(HConstants.CATALOG_FAMILY); + } + if (LOG.isDebugEnabled()) { LOG.debug("Scanning " + Bytes.toString(metaTableName) + " starting at row=" + Bytes.toStringBinary(startRow) + " for max=" + diff --git src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java index 197eb71..dc141a7 100644 --- src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java +++ src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java @@ -105,18 +105,6 @@ public abstract class BaseRegionObserver implements RegionObserver { final Store store, final StoreFile resultFile) { } @Override - public void preGetClosestRowBefore(final ObserverContext e, - final byte [] row, final byte [] family, final Result result) - throws IOException { - } - - @Override - public void postGetClosestRowBefore(final ObserverContext e, - final byte [] row, final byte [] family, final Result result) - throws IOException { - } - - @Override public void preGet(final ObserverContext e, final Get get, final List results) throws IOException { } diff --git src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index 18c13c4..35dfc70 100644 --- src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -369,11 +369,6 @@ public abstract class CoprocessorHost { } } - public Result getRowOrBefore(byte[] row, byte[] family) - throws IOException { - return table.getRowOrBefore(row, family); - } - public Result get(Get get) throws IOException { return table.get(get); } diff --git src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index 30c61ca..5b4f42d 100644 --- src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -166,40 +166,6 @@ public interface RegionObserver extends Coprocessor { boolean abortRequested); /** - * Called before a client makes a GetClosestRowBefore request. - *

- * Call CoprocessorEnvironment#bypass to skip default actions - *

- * Call CoprocessorEnvironment#complete to skip any subsequent chained - * coprocessors - * @param c the environment provided by the region server - * @param row the row - * @param family the family - * @param result The result to return to the client if default processing - * is bypassed. Can be modified. Will not be used if default processing - * is not bypassed. - * @throws IOException if an error occurred on the coprocessor - */ - void preGetClosestRowBefore(final ObserverContext c, - final byte [] row, final byte [] family, final Result result) - throws IOException; - - /** - * Called after a client makes a GetClosestRowBefore request. - *

- * Call CoprocessorEnvironment#complete to skip any subsequent chained - * coprocessors - * @param c the environment provided by the region server - * @param row the row - * @param family the desired family - * @param result the result to return to the client, modify as necessary - * @throws IOException if an error occurred on the coprocessor - */ - void postGetClosestRowBefore(final ObserverContext c, - final byte [] row, final byte [] family, final Result result) - throws IOException; - - /** * Called before the client performs a Get *

* Call CoprocessorEnvironment#bypass to skip default actions diff --git src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java index 757f98e..d501c1a 100644 --- src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java +++ src/main/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java @@ -135,20 +135,6 @@ public interface HRegionInterface extends VersionedProtocol, Stoppable, Abortabl throws IllegalArgumentException; /** - * Return all the data for the row that matches row exactly, - * or the one that immediately preceeds it. - * - * @param regionName region name - * @param row row key - * @param family Column family to look for row in. - * @return map of values - * @throws IOException e - */ - public Result getClosestRowBefore(final byte [] regionName, - final byte [] row, final byte [] family) - throws IOException; - - /** * Perform Get operation. * @param regionName name of region to get from * @param get Get operation diff --git src/main/java/org/apache/hadoop/hbase/master/HMaster.java src/main/java/org/apache/hadoop/hbase/master/HMaster.java index dbc9251..4562256 100644 --- src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.catalog.MetaMigratev2; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.MetaScanner; @@ -563,17 +564,32 @@ Server { status.setStatus("Splitting logs after master startup"); splitLogAfterStartup(this.fileSystemManager, onlineServers); - // Make sure root and meta assigned before proceeding. - if (!assignRootAndMeta(status)) return; serverShutdownHandlerEnabled = true; this.serverManager.expireDeadNotExpiredServers(); - // Update meta with new HRI if required. i.e migrate all HRI with HTD to - // HRI with out HTD in meta and update the status in ROOT. This must happen - // before we assign all user regions or else the assignment will fail. - // TODO: Remove this when we do 0.94. - org.apache.hadoop.hbase.catalog.MetaMigrationRemovingHTD. - updateMetaWithNewHRI(this); + MetaMigratev2 metaMigratev2 = new MetaMigratev2(this); + int version = metaMigratev2.getVersion(); + // TODO: Remove this when we do 1.0 + if (version < HConstants.FILE_SYSTEM_VERSION_INT ){ + boolean rit = this.assignmentManager. + processRegionInTransitionAndBlockUntilAssigned( + HRegionInfo.ROOT_REGIONINFO); + + // Make sure root and meta assigned before proceeding. + metaMigratev2.updateAndOnlineRoot(); + + rit = this.assignmentManager. + processRegionInTransitionAndBlockUntilAssigned( + HRegionInfo.FIRST_META_REGIONINFO); + + metaMigratev2.updateAndOnlineMeta(); + + // Update meta with new HRI if required. i.e migrate all HRI with HTD to + // HRI with out HTD in meta and update the status in ROOT. This must happen + // before we assign all user regions or else the assignment will fail. + + } + assignRootAndMeta(status); // Fixup assignment manager status status.setStatus("Starting assignment manager"); diff --git src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java index 2ec6677..040ab38 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java @@ -463,7 +463,7 @@ public class ServerShutdownHandler extends EventHandler { return false; } // If our start rows do not compare, move on. - if (!Bytes.equals(daughter.getStartKey(), hri.getStartKey())) { + if (!Bytes.equals(daughter.getEndKey(), hri.getEndKey())) { return false; } // Else, table name and start rows compare. It means that the daughter diff --git src/main/java/org/apache/hadoop/hbase/migration/HRegionInfo090x2.java src/main/java/org/apache/hadoop/hbase/migration/HRegionInfo090x2.java new file mode 100644 index 0000000..7e70acb --- /dev/null +++ src/main/java/org/apache/hadoop/hbase/migration/HRegionInfo090x2.java @@ -0,0 +1,763 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.migration; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hbase.util.JenkinsHash; +import org.apache.hadoop.hbase.util.MD5Hash; +import org.apache.hadoop.io.VersionedWritable; +import org.apache.hadoop.io.WritableComparable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.EOFException; +import java.io.IOException; +import java.util.Arrays; + +/** + * This contains the newest old HRegionInfo class. It's used for migration. + * Migration happens via the writables interface. + */ +public class HRegionInfo090x2 extends VersionedWritable + implements WritableComparable { + // VERSION == 0 when HRegionInfo had an HTableDescriptor inside it. + public static final byte VERSION_PRE_092 = 0; + public static final byte VERSION = 1; + private static final Log LOG = LogFactory.getLog(HRegionInfo090x2.class); + + /** + * The new format for a region name contains its encodedName at the end. + * The encoded name also serves as the directory name for the region + * in the filesystem. + * + * New region name format: + * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. + * where, + * <encodedName> is a hex version of the MD5 hash of + * <tablename>,<startkey>,<regionIdTimestamp> + * + * The old region name format: + * <tablename>,<startkey>,<regionIdTimestamp> + * For region names in the old format, the encoded name is a 32-bit + * JenkinsHash integer value (in its decimal notation, string form). + *

+ * **NOTE** + * + * ROOT, the first META region, and regions created by an older + * version of HBase (0.20 or prior) will continue to use the + * old region name format. + */ + + /** Separator used to demarcate the encodedName in a region name + * in the new format. See description on new format above. + */ + private static final int ENC_SEPARATOR = '.'; + public static final int MD5_HEX_LENGTH = 32; + + /** + * Does region name contain its encoded name? + * @param regionName region name + * @return boolean indicating if this a new format region + * name which contains its encoded name. + */ + private static boolean hasEncodedName(final byte[] regionName) { + // check if region name ends in ENC_SEPARATOR + if ((regionName.length >= 1) + && (regionName[regionName.length - 1] == ENC_SEPARATOR)) { + // region name is new format. it contains the encoded name. + return true; + } + return false; + } + + /** + * @param regionName + * @return the encodedName + */ + public static String encodeRegionName(final byte [] regionName) { + String encodedName; + if (hasEncodedName(regionName)) { + // region is in new format: + // ,,/encodedName/ + encodedName = Bytes.toString(regionName, + regionName.length - MD5_HEX_LENGTH - 1, + MD5_HEX_LENGTH); + } else { + // old format region name. ROOT and first META region also + // use this format.EncodedName is the JenkinsHash value. + int hashVal = Math.abs(JenkinsHash.getInstance().hash(regionName, + regionName.length, 0)); + encodedName = String.valueOf(hashVal); + } + return encodedName; + } + + /** + * Use logging. + * @param encodedRegionName The encoded regionname. + * @return -ROOT- if passed 70236052 or + * .META. if passed 1028785192 else returns + * encodedRegionName + */ + public static String prettyPrint(final String encodedRegionName) { + if (encodedRegionName.equals("70236052")) { + return encodedRegionName + "/-ROOT-"; + } else if (encodedRegionName.equals("1028785192")) { + return encodedRegionName + "/.META."; + } + return encodedRegionName; + } + + /** delimiter used between portions of a region name */ + public static final int DELIMITER = ','; + + /** HRegionInfo for root region */ + public static final HRegionInfo090x2 ROOT_REGIONINFO = + new HRegionInfo090x2(0L, Bytes.toBytes("-ROOT-")); + + /** HRegionInfo for first meta region */ + public static final HRegionInfo090x2 FIRST_META_REGIONINFO = + new HRegionInfo090x2(1L, Bytes.toBytes(".META.")); + + private byte [] endKey = HConstants.EMPTY_BYTE_ARRAY; + // This flag is in the parent of a split while the parent is still referenced + // by daughter regions. We USED to set this flag when we disabled a table + // but now table state is kept up in zookeeper as of 0.90.0 HBase. + private boolean offLine = false; + private long regionId = -1; + private transient byte [] regionName = HConstants.EMPTY_BYTE_ARRAY; + private String regionNameStr = ""; + private boolean split = false; + private byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; + private int hashCode = -1; + //TODO: Move NO_HASH to HStoreFile which is really the only place it is used. + public static final String NO_HASH = null; + private volatile String encodedName = NO_HASH; + private byte [] encodedNameAsBytes = null; + + // Current TableName + private byte[] tableName = null; + + private void setHashCode() { + int result = Arrays.hashCode(this.regionName); + result ^= this.regionId; + result ^= Arrays.hashCode(this.startKey); + result ^= Arrays.hashCode(this.endKey); + result ^= Boolean.valueOf(this.offLine).hashCode(); + result ^= Arrays.hashCode(this.tableName); + this.hashCode = result; + } + + + /** + * Private constructor used constructing HRegionInfo for the catalog root and + * first meta regions + */ + private HRegionInfo090x2(long regionId, byte[] tableName) { + super(); + this.regionId = regionId; + this.tableName = tableName.clone(); + // Note: Root & First Meta regions names are still in old format + this.regionName = createRegionName(tableName, null, + regionId, false); + this.regionNameStr = Bytes.toStringBinary(this.regionName); + setHashCode(); + } + + /** Default constructor - creates empty object */ + public HRegionInfo090x2() { + super(); + } + + public HRegionInfo090x2(final byte[] tableName) { + this(tableName, null, null); + } + + /** + * Construct HRegionInfo with explicit parameters + * + * @param tableName the table name + * @param startKey first key in region + * @param endKey end of key range + * @throws IllegalArgumentException + */ + public HRegionInfo090x2(final byte[] tableName, final byte[] startKey, + final byte[] endKey) + throws IllegalArgumentException { + this(tableName, startKey, endKey, false); + } + + + /** + * Construct HRegionInfo with explicit parameters + * + * @param tableName the table descriptor + * @param startKey first key in region + * @param endKey end of key range + * @param split true if this region has split and we have daughter regions + * regions that may or may not hold references to this region. + * @throws IllegalArgumentException + */ + public HRegionInfo090x2(final byte[] tableName, final byte[] startKey, + final byte[] endKey, final boolean split) + throws IllegalArgumentException { + this(tableName, startKey, endKey, split, System.currentTimeMillis()); + } + + + /** + * Construct HRegionInfo with explicit parameters + * + * @param tableName the table descriptor + * @param startKey first key in region + * @param endKey end of key range + * @param split true if this region has split and we have daughter regions + * regions that may or may not hold references to this region. + * @param regionid Region id to use. + * @throws IllegalArgumentException + */ + public HRegionInfo090x2(final byte[] tableName, final byte[] startKey, + final byte[] endKey, final boolean split, final long regionid) + throws IllegalArgumentException { + + super(); + if (tableName == null) { + throw new IllegalArgumentException("tableName cannot be null"); + } + this.tableName = tableName.clone(); + this.offLine = false; + this.regionId = regionid; + + this.regionName = createRegionName(this.tableName, startKey, regionId, true); + + this.regionNameStr = Bytes.toStringBinary(this.regionName); + this.split = split; + this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone(); + this.startKey = startKey == null? + HConstants.EMPTY_START_ROW: startKey.clone(); + this.tableName = tableName.clone(); + setHashCode(); + } + + /** + * Costruct a copy of another HRegionInfo + * + * @param other + */ + public HRegionInfo090x2(HRegionInfo090x2 other) { + super(); + this.endKey = other.getEndKey(); + this.offLine = other.isOffline(); + this.regionId = other.getRegionId(); + this.regionName = other.getRegionName(); + this.regionNameStr = Bytes.toStringBinary(this.regionName); + this.split = other.isSplit(); + this.startKey = other.getStartKey(); + this.hashCode = other.hashCode(); + this.encodedName = other.getEncodedName(); + this.tableName = other.tableName; + } + + + /** + * Make a region name of passed parameters. + * @param tableName + * @param startKey Can be null + * @param regionid Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format + * (such that it contains its encoded name?). + * @return Region name made of passed tableName, startKey and id + */ + public static byte [] createRegionName(final byte [] tableName, + final byte [] startKey, final long regionid, boolean newFormat) { + return createRegionName(tableName, startKey, Long.toString(regionid), newFormat); + } + + /** + * Make a region name of passed parameters. + * @param tableName + * @param startKey Can be null + * @param id Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format + * (such that it contains its encoded name?). + * @return Region name made of passed tableName, startKey and id + */ + public static byte [] createRegionName(final byte [] tableName, + final byte [] startKey, final String id, boolean newFormat) { + return createRegionName(tableName, startKey, Bytes.toBytes(id), newFormat); + } + + /** + * Make a region name of passed parameters. + * @param tableName + * @param startKey Can be null + * @param id Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format + * (such that it contains its encoded name?). + * @return Region name made of passed tableName, startKey and id + */ + public static byte [] createRegionName(final byte [] tableName, + final byte [] startKey, final byte [] id, boolean newFormat) { + byte [] b = new byte [tableName.length + 2 + id.length + + (startKey == null? 0: startKey.length) + + (newFormat ? (MD5_HEX_LENGTH + 2) : 0)]; + + int offset = tableName.length; + System.arraycopy(tableName, 0, b, 0, offset); + b[offset++] = DELIMITER; + if (startKey != null && startKey.length > 0) { + System.arraycopy(startKey, 0, b, offset, startKey.length); + offset += startKey.length; + } + b[offset++] = DELIMITER; + System.arraycopy(id, 0, b, offset, id.length); + offset += id.length; + + if (newFormat) { + // + // Encoded name should be built into the region name. + // + // Use the region name thus far (namely, ,,) + // to compute a MD5 hash to be used as the encoded name, and append + // it to the byte buffer. + // + String md5Hash = MD5Hash.getMD5AsHex(b, 0, offset); + byte [] md5HashBytes = Bytes.toBytes(md5Hash); + + if (md5HashBytes.length != MD5_HEX_LENGTH) { + LOG.error("MD5-hash length mismatch: Expected=" + MD5_HEX_LENGTH + + "; Got=" + md5HashBytes.length); + } + + // now append the bytes '..' to the end + b[offset++] = ENC_SEPARATOR; + System.arraycopy(md5HashBytes, 0, b, offset, MD5_HEX_LENGTH); + offset += MD5_HEX_LENGTH; + b[offset++] = ENC_SEPARATOR; + } + + return b; + } + + /** + * Gets the table name from the specified region name. + * @param regionName + * @return Table name. + */ + public static byte [] getTableName(byte [] regionName) { + int offset = -1; + for (int i = 0; i < regionName.length; i++) { + if (regionName[i] == DELIMITER) { + offset = i; + break; + } + } + byte [] tableName = new byte[offset]; + System.arraycopy(regionName, 0, tableName, 0, offset); + return tableName; + } + + /** + * Separate elements of a regionName. + * @param regionName + * @return Array of byte[] containing tableName, startKey and id + * @throws IOException + */ + public static byte [][] parseRegionName(final byte [] regionName) + throws IOException { + int offset = -1; + for (int i = 0; i < regionName.length; i++) { + if (regionName[i] == DELIMITER) { + offset = i; + break; + } + } + if(offset == -1) throw new IOException("Invalid regionName format"); + byte [] tableName = new byte[offset]; + System.arraycopy(regionName, 0, tableName, 0, offset); + offset = -1; + for (int i = regionName.length - 1; i > 0; i--) { + if(regionName[i] == DELIMITER) { + offset = i; + break; + } + } + if(offset == -1) throw new IOException("Invalid regionName format"); + byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; + if(offset != tableName.length + 1) { + startKey = new byte[offset - tableName.length - 1]; + System.arraycopy(regionName, tableName.length + 1, startKey, 0, + offset - tableName.length - 1); + } + byte [] id = new byte[regionName.length - offset - 1]; + System.arraycopy(regionName, offset + 1, id, 0, + regionName.length - offset - 1); + byte [][] elements = new byte[3][]; + elements[0] = tableName; + elements[1] = startKey; + elements[2] = id; + return elements; + } + + /** @return the regionId */ + public long getRegionId(){ + return regionId; + } + + /** + * @return the regionName as an array of bytes. + * @see #getRegionNameAsString() + */ + public byte [] getRegionName(){ + return regionName; + } + + /** + * @return Region name as a String for use in logging, etc. + */ + public String getRegionNameAsString() { + if (hasEncodedName(this.regionName)) { + // new format region names already have their encoded name. + return this.regionNameStr; + } + + // old format. regionNameStr doesn't have the region name. + // + // + return this.regionNameStr + "." + this.getEncodedName(); + } + + /** @return the encoded region name */ + public synchronized String getEncodedName() { + if (this.encodedName == NO_HASH) { + this.encodedName = encodeRegionName(this.regionName); + } + return this.encodedName; + } + + public synchronized byte [] getEncodedNameAsBytes() { + if (this.encodedNameAsBytes == null) { + this.encodedNameAsBytes = Bytes.toBytes(getEncodedName()); + } + return this.encodedNameAsBytes; + } + + /** @return the startKey */ + public byte [] getStartKey(){ + return startKey; + } + + /** @return the endKey */ + public byte [] getEndKey(){ + return endKey; + } + + /** + * Get current table name of the region + * @return byte array of table name + */ + public byte[] getTableName() { + if (tableName == null || tableName.length == 0) { + tableName = getTableName(getRegionName()); + } + return tableName; + } + + /** + * Get current table name as string + * @return string representation of current table + */ + public String getTableNameAsString() { + return Bytes.toString(tableName); + } + + /** + * Returns true if the given inclusive range of rows is fully contained + * by this region. For example, if the region is foo,a,g and this is + * passed ["b","c"] or ["a","c"] it will return true, but if this is passed + * ["b","z"] it will return false. + * @throws IllegalArgumentException if the range passed is invalid (ie end < start) + */ + public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { + if (Bytes.compareTo(rangeStartKey, rangeEndKey) > 0) { + throw new IllegalArgumentException( + "Invalid range: " + Bytes.toStringBinary(rangeStartKey) + + " > " + Bytes.toStringBinary(rangeEndKey)); + } + + boolean firstKeyInRange = Bytes.compareTo(rangeStartKey, startKey) >= 0; + boolean lastKeyInRange = + Bytes.compareTo(rangeEndKey, endKey) < 0 || + Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY); + return firstKeyInRange && lastKeyInRange; + } + + /** + * Return true if the given row falls in this region. + */ + public boolean containsRow(byte[] row) { + return Bytes.compareTo(row, startKey) >= 0 && + (Bytes.compareTo(row, endKey) < 0 || + Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); + } + + /** + * @return the tableDesc + * @deprecated Do not use; expensive call + * use HRegionInfo.getTableNameAsString() in place of + * HRegionInfo.getTableDesc().getNameAsString() + */ + @Deprecated + public HTableDescriptor getTableDesc() { + Configuration c = HBaseConfiguration.create(); + FileSystem fs; + try { + fs = FileSystem.get(c); + } catch (IOException e) { + throw new RuntimeException(e); + } + FSTableDescriptors fstd = + new FSTableDescriptors(fs, new Path(c.get(HConstants.HBASE_DIR))); + try { + return fstd.get(this.tableName); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * @param newDesc new table descriptor to use + * @deprecated Do not use; expensive call + */ + @Deprecated + public void setTableDesc(HTableDescriptor newDesc) { + Configuration c = HBaseConfiguration.create(); + FileSystem fs; + try { + fs = FileSystem.get(c); + } catch (IOException e) { + throw new RuntimeException(e); + } + FSTableDescriptors fstd = + new FSTableDescriptors(fs, new Path(c.get(HConstants.HBASE_DIR))); + try { + fstd.add(newDesc); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** @return true if this is the root region */ + public boolean isRootRegion() { + return Bytes.equals(tableName, HRegionInfo090x2.ROOT_REGIONINFO.getTableName()); + } + + /** @return true if this region is from a table that is a meta table, + * either .META. or -ROOT- + */ + public boolean isMetaTable() { + return isRootRegion() || isMetaRegion(); + } + + /** @return true if this region is a meta region */ + public boolean isMetaRegion() { + return Bytes.equals(tableName, HRegionInfo090x2.FIRST_META_REGIONINFO.getTableName()); + } + + /** + * @return True if has been split and has daughters. + */ + public boolean isSplit() { + return this.split; + } + + /** + * @param split set split status + */ + public void setSplit(boolean split) { + this.split = split; + } + + /** + * @return True if this region is offline. + */ + public boolean isOffline() { + return this.offLine; + } + + /** + * The parent of a region split is offline while split daughters hold + * references to the parent. Offlined regions are closed. + * @param offLine Set online/offline status. + */ + public void setOffline(boolean offLine) { + this.offLine = offLine; + } + + + /** + * @return True if this is a split parent region. + */ + public boolean isSplitParent() { + if (!isSplit()) return false; + if (!isOffline()) { + LOG.warn("Region is split but NOT offline: " + getRegionNameAsString()); + } + return true; + } + + /** + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + return "{" + HConstants.NAME + " => '" + + this.regionNameStr + + "', STARTKEY => '" + + Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + + Bytes.toStringBinary(this.endKey) + + "', ENCODED => " + getEncodedName() + "," + + (isOffline()? " OFFLINE => true,": "") + + (isSplit()? " SPLIT => true,": "") + "}"; + } + + /** + * @see java.lang.Object#equals(java.lang.Object) + */ + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null) { + return false; + } + if (!(o instanceof HRegionInfo090x2)) { + return false; + } + return this.compareTo((HRegionInfo090x2)o) == 0; + } + + /** + * @see java.lang.Object#hashCode() + */ + @Override + public int hashCode() { + return this.hashCode; + } + + /** @return the object version number */ + @Override + public byte getVersion() { + return VERSION; + } + + // + // Writable + // + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + Bytes.writeByteArray(out, endKey); + out.writeBoolean(offLine); + out.writeLong(regionId); + Bytes.writeByteArray(out, regionName); + out.writeBoolean(split); + Bytes.writeByteArray(out, startKey); + Bytes.writeByteArray(out, tableName); + out.writeInt(hashCode); + } + + @Override + public void readFields(DataInput in) throws IOException { + // Read the single version byte. We don't ask the super class do it + // because freaks out if its not the current classes' version. This method + // can deserialize version 0 and version 1 of HRI. + byte version = in.readByte(); + this.endKey = Bytes.readByteArray(in); + this.offLine = in.readBoolean(); + this.regionId = in.readLong(); + this.regionName = Bytes.readByteArray(in); + this.regionNameStr = Bytes.toStringBinary(this.regionName); + this.split = in.readBoolean(); + this.startKey = Bytes.readByteArray(in); + this.tableName = Bytes.readByteArray(in); + this.hashCode = in.readInt(); + } + + // + // Comparable + // + + public int compareTo(HRegionInfo090x2 o) { + if (o == null) { + return 1; + } + + // Are regions of same table? + int result = Bytes.compareTo(this.tableName, o.tableName); + if (result != 0) { + return result; + } + + // Compare start keys. + result = Bytes.compareTo(this.startKey, o.startKey); + if (result != 0) { + return result; + } + + // Compare end keys. + result = Bytes.compareTo(this.endKey, o.endKey); + + if (result != 0) { + if (this.getStartKey().length != 0 + && this.getEndKey().length == 0) { + return 1; // this is last region + } + if (o.getStartKey().length != 0 + && o.getEndKey().length == 0) { + return -1; // o is the last region + } + return result; + } + if (this.offLine == o.offLine) + return 0; + if (this.offLine == true) return -1; + + return 1; + } + + /** + * @return Comparator to use comparing {@link KeyValue}s. + */ + public KVComparator getComparator() { + return isRootRegion()? KeyValue.ROOT_COMPARATOR: isMetaRegion()? + KeyValue.META_COMPARATOR: KeyValue.COMPARATOR; + } +} diff --git src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java deleted file mode 100644 index 8174cf5..0000000 --- src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Copyright 2009 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import java.util.NavigableMap; -import java.util.NavigableSet; -import java.util.TreeMap; -import java.util.TreeSet; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * State and utility processing {@link HRegion#getClosestRowBefore(byte[], byte[])}. - * Like {@link ScanDeleteTracker} and {@link ScanDeleteTracker} but does not - * implement the {@link DeleteTracker} interface since state spans rows (There - * is no update nor reset method). - */ -@InterfaceAudience.Private -class GetClosestRowBeforeTracker { - private final KeyValue targetkey; - // Any cell w/ a ts older than this is expired. - private final long oldestts; - private KeyValue candidate = null; - private final KVComparator kvcomparator; - // Flag for whether we're doing getclosest on a metaregion. - private final boolean metaregion; - // Offset and length into targetkey demarking table name (if in a metaregion). - private final int rowoffset; - private final int tablenamePlusDelimiterLength; - - // Deletes keyed by row. Comparator compares on row portion of KeyValue only. - private final NavigableMap> deletes; - - /** - * @param c - * @param kv Presume first on row: i.e. empty column, maximum timestamp and - * a type of Type.Maximum - * @param ttl Time to live in ms for this Store - * @param metaregion True if this is .META. or -ROOT- region. - */ - GetClosestRowBeforeTracker(final KVComparator c, final KeyValue kv, - final long ttl, final boolean metaregion) { - super(); - this.metaregion = metaregion; - this.targetkey = kv; - // If we are in a metaregion, then our table name is the prefix on the - // targetkey. - this.rowoffset = kv.getRowOffset(); - int l = -1; - if (metaregion) { - l = KeyValue.getDelimiter(kv.getBuffer(), rowoffset, kv.getRowLength(), - HRegionInfo.DELIMITER) - this.rowoffset; - } - this.tablenamePlusDelimiterLength = metaregion? l + 1: -1; - this.oldestts = System.currentTimeMillis() - ttl; - this.kvcomparator = c; - KeyValue.RowComparator rc = new KeyValue.RowComparator(this.kvcomparator); - this.deletes = new TreeMap>(rc); - } - - /** - * @param kv - * @return True if this kv is expired. - */ - boolean isExpired(final KeyValue kv) { - return Store.isExpired(kv, this.oldestts); - } - - /* - * Add the specified KeyValue to the list of deletes. - * @param kv - */ - private void addDelete(final KeyValue kv) { - NavigableSet rowdeletes = this.deletes.get(kv); - if (rowdeletes == null) { - rowdeletes = new TreeSet(this.kvcomparator); - this.deletes.put(kv, rowdeletes); - } - rowdeletes.add(kv); - } - - /* - * @param kv Adds candidate if nearer the target than previous candidate. - * @return True if updated candidate. - */ - private boolean addCandidate(final KeyValue kv) { - if (!isDeleted(kv) && isBetterCandidate(kv)) { - this.candidate = kv; - return true; - } - return false; - } - - boolean isBetterCandidate(final KeyValue contender) { - return this.candidate == null || - (this.kvcomparator.compareRows(this.candidate, contender) < 0 && - this.kvcomparator.compareRows(contender, this.targetkey) <= 0); - } - - /* - * Check if specified KeyValue buffer has been deleted by a previously - * seen delete. - * @param kv - * @return true is the specified KeyValue is deleted, false if not - */ - private boolean isDeleted(final KeyValue kv) { - if (this.deletes.isEmpty()) return false; - NavigableSet rowdeletes = this.deletes.get(kv); - if (rowdeletes == null || rowdeletes.isEmpty()) return false; - return isDeleted(kv, rowdeletes); - } - - /** - * Check if the specified KeyValue buffer has been deleted by a previously - * seen delete. - * @param kv - * @param ds - * @return True is the specified KeyValue is deleted, false if not - */ - public boolean isDeleted(final KeyValue kv, final NavigableSet ds) { - if (deletes == null || deletes.isEmpty()) return false; - for (KeyValue d: ds) { - long kvts = kv.getTimestamp(); - long dts = d.getTimestamp(); - if (d.isDeleteFamily()) { - if (kvts <= dts) return true; - continue; - } - // Check column - int ret = Bytes.compareTo(kv.getBuffer(), kv.getQualifierOffset(), - kv.getQualifierLength(), - d.getBuffer(), d.getQualifierOffset(), d.getQualifierLength()); - if (ret <= -1) { - // This delete is for an earlier column. - continue; - } else if (ret >= 1) { - // Beyond this kv. - break; - } - // Check Timestamp - if (kvts > dts) return false; - - // Check Type - switch (KeyValue.Type.codeToType(d.getType())) { - case Delete: return kvts == dts; - case DeleteColumn: return true; - default: continue; - } - } - return false; - } - - /* - * Handle keys whose values hold deletes. - * Add to the set of deletes and then if the candidate keys contain any that - * might match, then check for a match and remove it. Implies candidates - * is made with a Comparator that ignores key type. - * @param kv - * @return True if we removed k from candidates. - */ - boolean handleDeletes(final KeyValue kv) { - addDelete(kv); - boolean deleted = false; - if (!hasCandidate()) return deleted; - if (isDeleted(this.candidate)) { - this.candidate = null; - deleted = true; - } - return deleted; - } - - /** - * Do right thing with passed key, add to deletes or add to candidates. - * @param kv - * @return True if we added a candidate - */ - boolean handle(final KeyValue kv) { - if (kv.isDelete()) { - handleDeletes(kv); - return false; - } - return addCandidate(kv); - } - - /** - * @return True if has candidate - */ - public boolean hasCandidate() { - return this.candidate != null; - } - - /** - * @return Best candidate or null. - */ - public KeyValue getCandidate() { - return this.candidate; - } - - public KeyValue getTargetKey() { - return this.targetkey; - } - - /** - * @param kv Current kv - * @param First on row kv. - * @param state - * @return True if we went too far, past the target key. - */ - boolean isTooFar(final KeyValue kv, final KeyValue firstOnRow) { - return this.kvcomparator.compareRows(kv, firstOnRow) > 0; - } - - boolean isTargetTable(final KeyValue kv) { - if (!metaregion) return true; - // Compare start of keys row. Compare including delimiter. Saves having - // to calculate where tablename ends in the candidate kv. - return Bytes.compareTo(this.targetkey.getBuffer(), this.rowoffset, - this.tablenamePlusDelimiterLength, - kv.getBuffer(), kv.getRowOffset(), this.tablenamePlusDelimiterLength) == 0; - } -} \ No newline at end of file diff --git src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 02d55d4..fd72640 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -1578,62 +1578,6 @@ public class HRegion implements HeapSize { // , Writable{ // get() methods for client use. ////////////////////////////////////////////////////////////////////////////// /** - * Return all the data for the row that matches row exactly, - * or the one that immediately preceeds it, at or immediately before - * ts. - * - * @param row row key - * @return map of values - * @throws IOException - */ - Result getClosestRowBefore(final byte [] row) - throws IOException{ - return getClosestRowBefore(row, HConstants.CATALOG_FAMILY); - } - - /** - * Return all the data for the row that matches row exactly, - * or the one that immediately preceeds it, at or immediately before - * ts. - * - * @param row row key - * @param family column family to find on - * @return map of values - * @throws IOException read exceptions - */ - public Result getClosestRowBefore(final byte [] row, final byte [] family) - throws IOException { - if (coprocessorHost != null) { - Result result = new Result(); - if (coprocessorHost.preGetClosestRowBefore(row, family, result)) { - return result; - } - } - // look across all the HStores for this region and determine what the - // closest key is across all column families, since the data may be sparse - checkRow(row, "getClosestRowBefore"); - startRegionOperation(); - this.readRequestsCount.increment(); - try { - Store store = getStore(family); - // get the closest key. (HStore.getRowKeyAtOrBefore can return null) - KeyValue key = store.getRowKeyAtOrBefore(row); - Result result = null; - if (key != null) { - Get get = new Get(key.getRow()); - get.addFamily(family); - result = get(get, null); - } - if (coprocessorHost != null) { - coprocessorHost.postGetClosestRowBefore(row, family, result); - } - return result; - } finally { - closeRegionOperation(); - } - } - - /** * Return an iterator that scans over the HRegion, returning the indicated * columns and rows specified by the {@link Scan}. *

@@ -3196,6 +3140,7 @@ public class HRegion implements HeapSize { // , Writable{ byte[] familyName = p.getFirst(); String path = p.getSecond(); + Store store = getStore(familyName); if (store == null) { IOException ioe = new DoNotRetryIOException( diff --git src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index e0af8fb..c1365a2 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1934,22 +1934,6 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, return getRegion(regionName).getRegionInfo(); } - public Result getClosestRowBefore(final byte[] regionName, final byte[] row, - final byte[] family) throws IOException { - checkOpen(); - requestCount.incrementAndGet(); - try { - // locate the region we're operating on - HRegion region = getRegion(regionName); - // ask the region for all the data - - Result r = region.getClosestRowBefore(row, family); - return r; - } catch (Throwable t) { - throw convertThrowableToIOE(cleanup(t)); - } - } - /** {@inheritDoc} */ public Result get(byte[] regionName, Get get) throws IOException { checkOpen(); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java index 0592f40..08c346a 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java @@ -354,88 +354,6 @@ public class MemStore implements HeapSize { } /** - * @param state column/delete tracking state - */ - void getRowKeyAtOrBefore(final GetClosestRowBeforeTracker state) { - this.lock.readLock().lock(); - try { - getRowKeyAtOrBefore(kvset, state); - getRowKeyAtOrBefore(snapshot, state); - } finally { - this.lock.readLock().unlock(); - } - } - - /* - * @param set - * @param state Accumulates deletes and candidates. - */ - private void getRowKeyAtOrBefore(final NavigableSet set, - final GetClosestRowBeforeTracker state) { - if (set.isEmpty()) { - return; - } - if (!walkForwardInSingleRow(set, state.getTargetKey(), state)) { - // Found nothing in row. Try backing up. - getRowKeyBefore(set, state); - } - } - - /* - * Walk forward in a row from firstOnRow. Presumption is that - * we have been passed the first possible key on a row. As we walk forward - * we accumulate deletes until we hit a candidate on the row at which point - * we return. - * @param set - * @param firstOnRow First possible key on this row. - * @param state - * @return True if we found a candidate walking this row. - */ - private boolean walkForwardInSingleRow(final SortedSet set, - final KeyValue firstOnRow, final GetClosestRowBeforeTracker state) { - boolean foundCandidate = false; - SortedSet tail = set.tailSet(firstOnRow); - if (tail.isEmpty()) return foundCandidate; - for (Iterator i = tail.iterator(); i.hasNext();) { - KeyValue kv = i.next(); - // Did we go beyond the target row? If so break. - if (state.isTooFar(kv, firstOnRow)) break; - if (state.isExpired(kv)) { - i.remove(); - continue; - } - // If we added something, this row is a contender. break. - if (state.handle(kv)) { - foundCandidate = true; - break; - } - } - return foundCandidate; - } - - /* - * Walk backwards through the passed set a row at a time until we run out of - * set or until we get a candidate. - * @param set - * @param state - */ - private void getRowKeyBefore(NavigableSet set, - final GetClosestRowBeforeTracker state) { - KeyValue firstOnRow = state.getTargetKey(); - for (Member p = memberOfPreviousRow(set, state, firstOnRow); - p != null; p = memberOfPreviousRow(p.set, state, firstOnRow)) { - // Make sure we don't fall out of our table. - if (!state.isTargetTable(p.kv)) break; - // Stop looking if we've exited the better candidate range. - if (!state.isBetterCandidate(p.kv)) break; - // Make into firstOnRow - firstOnRow = new KeyValue(p.kv.getRow(), HConstants.LATEST_TIMESTAMP); - // If we find something, break; - if (walkForwardInSingleRow(p.set, firstOnRow, state)) break; - } - } - - /** * Given the specs of a column, update it, first by inserting a new record, * then removing the old one. Since there is only 1 KeyValue involved, the memstoreTS * will be set to 0, thus ensuring that they instantly appear to anyone. The underlying @@ -610,29 +528,6 @@ public class MemStore implements HeapSize { } } - /* - * @param set Set to walk back in. Pass a first in row or we'll return - * same row (loop). - * @param state Utility and context. - * @param firstOnRow First item on the row after the one we want to find a - * member in. - * @return Null or member of row previous to firstOnRow - */ - private Member memberOfPreviousRow(NavigableSet set, - final GetClosestRowBeforeTracker state, final KeyValue firstOnRow) { - NavigableSet head = set.headSet(firstOnRow, false); - if (head.isEmpty()) return null; - for (Iterator i = head.descendingIterator(); i.hasNext();) { - KeyValue found = i.next(); - if (state.isExpired(found)) { - i.remove(); - continue; - } - return new Member(head, found); - } - return null; - } - /** * @return scanner on memstore and snapshot in this order. */ diff --git src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index a3850e5..34143a4 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -484,60 +484,6 @@ public class RegionCoprocessorHost // RegionObserver support /** - * @param row the row key - * @param family the family - * @param result the result set from the region - * @return true if default processing should be bypassed - * @exception IOException Exception - */ - public boolean preGetClosestRowBefore(final byte[] row, final byte[] family, - final Result result) throws IOException { - boolean bypass = false; - ObserverContext ctx = null; - for (RegionEnvironment env: coprocessors) { - if (env.getInstance() instanceof RegionObserver) { - ctx = ObserverContext.createAndPrepare(env, ctx); - try { - ((RegionObserver)env.getInstance()).preGetClosestRowBefore(ctx, row, - family, result); - } catch (Throwable e) { - handleCoprocessorThrowable(env, e); - } - bypass |= ctx.shouldBypass(); - if (ctx.shouldComplete()) { - break; - } - } - } - return bypass; - } - - /** - * @param row the row key - * @param family the family - * @param result the result set from the region - * @exception IOException Exception - */ - public void postGetClosestRowBefore(final byte[] row, final byte[] family, - final Result result) throws IOException { - ObserverContext ctx = null; - for (RegionEnvironment env: coprocessors) { - if (env.getInstance() instanceof RegionObserver) { - ctx = ObserverContext.createAndPrepare(env, ctx); - try { - ((RegionObserver)env.getInstance()).postGetClosestRowBefore(ctx, row, - family, result); - } catch (Throwable e) { - handleCoprocessorThrowable(env, e); - } - if (ctx.shouldComplete()) { - break; - } - } - } - } - - /** * @param get the Get request * @return true if default processing should be bypassed * @exception IOException Exception diff --git src/main/java/org/apache/hadoop/hbase/regionserver/Store.java src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 0c7b396..eaca508 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -1623,99 +1623,6 @@ public class Store extends SchemaConfigured implements HeapSize { return key.getTimestamp() < oldestTimestamp; } - /** - * Find the key that matches row exactly, or the one that immediately - * precedes it. WARNING: Only use this method on a table where writes occur - * with strictly increasing timestamps. This method assumes this pattern of - * writes in order to make it reasonably performant. Also our search is - * dependent on the axiom that deletes are for cells that are in the container - * that follows whether a memstore snapshot or a storefile, not for the - * current container: i.e. we'll see deletes before we come across cells we - * are to delete. Presumption is that the memstore#kvset is processed before - * memstore#snapshot and so on. - * @param row The row key of the targeted row. - * @return Found keyvalue or null if none found. - * @throws IOException - */ - KeyValue getRowKeyAtOrBefore(final byte[] row) throws IOException { - // If minVersions is set, we will not ignore expired KVs. - // As we're only looking for the latest matches, that should be OK. - // With minVersions > 0 we guarantee that any KV that has any version - // at all (expired or not) has at least one version that will not expire. - // Note that this method used to take a KeyValue as arguments. KeyValue - // can be back-dated, a row key cannot. - long ttlToUse = scanInfo.getMinVersions() > 0 ? Long.MAX_VALUE : this.ttl; - - KeyValue kv = new KeyValue(row, HConstants.LATEST_TIMESTAMP); - - GetClosestRowBeforeTracker state = new GetClosestRowBeforeTracker( - this.comparator, kv, ttlToUse, this.region.getRegionInfo().isMetaRegion()); - this.lock.readLock().lock(); - try { - // First go to the memstore. Pick up deletes and candidates. - this.memstore.getRowKeyAtOrBefore(state); - // Check if match, if we got a candidate on the asked for 'kv' row. - // Process each store file. Run through from newest to oldest. - for (StoreFile sf : Lists.reverse(storefiles)) { - // Update the candidate keys from the current map file - rowAtOrBeforeFromStoreFile(sf, state); - } - return state.getCandidate(); - } finally { - this.lock.readLock().unlock(); - } - } - - /* - * Check an individual MapFile for the row at or before a given row. - * @param f - * @param state - * @throws IOException - */ - private void rowAtOrBeforeFromStoreFile(final StoreFile f, - final GetClosestRowBeforeTracker state) - throws IOException { - StoreFile.Reader r = f.getReader(); - if (r == null) { - LOG.warn("StoreFile " + f + " has a null Reader"); - return; - } - // TODO: Cache these keys rather than make each time? - byte [] fk = r.getFirstKey(); - KeyValue firstKV = KeyValue.createKeyValueFromKey(fk, 0, fk.length); - byte [] lk = r.getLastKey(); - KeyValue lastKV = KeyValue.createKeyValueFromKey(lk, 0, lk.length); - KeyValue firstOnRow = state.getTargetKey(); - if (this.comparator.compareRows(lastKV, firstOnRow) < 0) { - // If last key in file is not of the target table, no candidates in this - // file. Return. - if (!state.isTargetTable(lastKV)) return; - // If the row we're looking for is past the end of file, set search key to - // last key. TODO: Cache last and first key rather than make each time. - firstOnRow = new KeyValue(lastKV.getRow(), HConstants.LATEST_TIMESTAMP); - } - // Get a scanner that caches blocks and that uses pread. - HFileScanner scanner = r.getHFileReader().getScanner(true, true, false); - // Seek scanner. If can't seek it, return. - if (!seekToScanner(scanner, firstOnRow, firstKV)) return; - // If we found candidate on firstOnRow, just return. THIS WILL NEVER HAPPEN! - // Unlikely that there'll be an instance of actual first row in table. - if (walkForwardInSingleRow(scanner, firstOnRow, state)) return; - // If here, need to start backing up. - while (scanner.seekBefore(firstOnRow.getBuffer(), firstOnRow.getKeyOffset(), - firstOnRow.getKeyLength())) { - KeyValue kv = scanner.getKeyValue(); - if (!state.isTargetTable(kv)) break; - if (!state.isBetterCandidate(kv)) break; - // Make new first on row. - firstOnRow = new KeyValue(kv.getRow(), HConstants.LATEST_TIMESTAMP); - // Seek scanner. If can't seek it, break. - if (!seekToScanner(scanner, firstOnRow, firstKV)) break; - // If we find something, break; - if (walkForwardInSingleRow(scanner, firstOnRow, state)) break; - } - } - /* * Seek the file scanner to firstOnRow or first entry in file. * @param scanner @@ -1736,39 +1643,6 @@ public class Store extends SchemaConfigured implements HeapSize { return result >= 0; } - /* - * When we come in here, we are probably at the kv just before we break into - * the row that firstOnRow is on. Usually need to increment one time to get - * on to the row we are interested in. - * @param scanner - * @param firstOnRow - * @param state - * @return True we found a candidate. - * @throws IOException - */ - private boolean walkForwardInSingleRow(final HFileScanner scanner, - final KeyValue firstOnRow, - final GetClosestRowBeforeTracker state) - throws IOException { - boolean foundCandidate = false; - do { - KeyValue kv = scanner.getKeyValue(); - // If we are not in the row, skip. - if (this.comparator.compareRows(kv, firstOnRow) < 0) continue; - // Did we go beyond the target row? If so break. - if (state.isTooFar(kv, firstOnRow)) break; - if (state.isExpired(kv)) { - continue; - } - // If we added something, this row is a contender. break. - if (state.handle(kv)) { - foundCandidate = true; - break; - } - } while(scanner.next()); - return foundCandidate; - } - public boolean canSplit() { this.lock.readLock().lock(); try { diff --git src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java index 59a791d..67f2c2d 100644 --- src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java +++ src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java @@ -87,11 +87,7 @@ public class StorageClusterStatusResource extends ResourceBase { for (HServerLoad.RegionLoad region: load.getRegionsLoad().values()) { node.addRegion(region.getName(), region.getStores(), region.getStorefiles(), region.getStorefileSizeMB(), - region.getMemStoreSizeMB(), region.getStorefileIndexSizeMB(), - region.getReadRequestsCount(), region.getWriteRequestsCount(), - region.getRootIndexSizeKB(), region.getTotalStaticIndexSizeKB(), - region.getTotalStaticBloomSizeKB(), region.getTotalCompactingKVs(), - region.getCurrentCompactedKVs()); + region.getMemStoreSizeMB(), region.getStorefileIndexSizeMB()); } } for (ServerName name: status.getDeadServerNames()) { diff --git src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java index 56e31e1..2ecdce8 100644 --- src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java +++ src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java @@ -575,10 +575,6 @@ public class RemoteHTable implements HTableInterface { return true; } - public Result getRowOrBefore(byte[] row, byte[] family) throws IOException { - throw new IOException("getRowOrBefore not supported"); - } - public RowLock lockRow(byte[] row) throws IOException { throw new IOException("lockRow not implemented"); } diff --git src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java index 5072221..2ce6133 100644 --- src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java +++ src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java @@ -83,13 +83,6 @@ import com.google.protobuf.ByteString; * <attribute name="storefileSizeMB" type="int"></attribute> * <attribute name="memstoreSizeMB" type="int"></attribute> * <attribute name="storefileIndexSizeMB" type="int"></attribute> - * <attribute name="readRequestsCount" type="int"></attribute> - * <attribute name="writeRequestsCount" type="int"></attribute> - * <attribute name="rootIndexSizeKB" type="int"></attribute> - * <attribute name="totalStaticIndexSizeKB" type="int"></attribute> - * <attribute name="totalStaticBloomSizeKB" type="int"></attribute> - * <attribute name="totalCompactingKVs" type="int"></attribute> - * <attribute name="currentCompactedKVs" type="int"></attribute> * </complexType> * */ @@ -114,13 +107,6 @@ public class StorageClusterStatusModel private int storefileSizeMB; private int memstoreSizeMB; private int storefileIndexSizeMB; - private long readRequestsCount; - private long writeRequestsCount; - private int rootIndexSizeKB; - private int totalStaticIndexSizeKB; - private int totalStaticBloomSizeKB; - private long totalCompactingKVs; - private long currentCompactedKVs; /** * Default constructor @@ -145,23 +131,13 @@ public class StorageClusterStatusModel * @param storefileIndexSizeMB total size of store file indexes, in MB */ public Region(byte[] name, int stores, int storefiles, - int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB, - long readRequestsCount, long writeRequestsCount, int rootIndexSizeKB, - int totalStaticIndexSizeKB, int totalStaticBloomSizeKB, - long totalCompactingKVs, long currentCompactedKVs) { + int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB) { this.name = name; this.stores = stores; this.storefiles = storefiles; this.storefileSizeMB = storefileSizeMB; this.memstoreSizeMB = memstoreSizeMB; this.storefileIndexSizeMB = storefileIndexSizeMB; - this.readRequestsCount = readRequestsCount; - this.writeRequestsCount = writeRequestsCount; - this.rootIndexSizeKB = rootIndexSizeKB; - this.totalStaticIndexSizeKB = totalStaticIndexSizeKB; - this.totalStaticBloomSizeKB = totalStaticBloomSizeKB; - this.totalCompactingKVs = totalCompactingKVs; - this.currentCompactedKVs = currentCompactedKVs; } /** @@ -211,118 +187,8 @@ public class StorageClusterStatusModel public int getStorefileIndexSizeMB() { return storefileIndexSizeMB; } - - /** - * @return the current total read requests made to region - */ - @XmlAttribute - public long getReadRequestsCount() { - return readRequestsCount; - } - - /** - * @return the current total write requests made to region - */ - @XmlAttribute - public long getWriteRequestsCount() { - return writeRequestsCount; - } - - /** - * @return The current total size of root-level indexes for the region, in KB. - */ - @XmlAttribute - public int getRootIndexSizeKB() { - return rootIndexSizeKB; - } - - /** - * @return The total size of static index, in KB - */ - @XmlAttribute - public int getTotalStaticIndexSizeKB() { - return totalStaticIndexSizeKB; - } - - /** - * @return The total size of static bloom, in KB - */ - @XmlAttribute - public int getTotalStaticBloomSizeKB() { - return totalStaticBloomSizeKB; - } - - /** - * @return The total number of compacting key-values - */ - @XmlAttribute - public long getTotalCompactingKVs() { - return totalCompactingKVs; - } /** - * @return The number of current compacted key-values - */ - @XmlAttribute - public long getCurrentCompactedKVs() { - return currentCompactedKVs; - } - - /** - * @param readRequestsCount The current total read requests made to region - */ - public void setReadRequestsCount(long readRequestsCount) { - this.readRequestsCount = readRequestsCount; - } - - /** - * @param rootIndexSizeKB The current total size of root-level indexes - * for the region, in KB - */ - public void setRootIndexSizeKB(int rootIndexSizeKB) { - this.rootIndexSizeKB = rootIndexSizeKB; - } - - /** - * @param writeRequestsCount The current total write requests made to region - */ - public void setWriteRequestsCount(long writeRequestsCount) { - this.writeRequestsCount = writeRequestsCount; - } - - /** - * @param currentCompactedKVs The completed count of key values - * in currently running compaction - */ - public void setCurrentCompactedKVs(long currentCompactedKVs) { - this.currentCompactedKVs = currentCompactedKVs; - } - - /** - * @param totalCompactingKVs The total compacting key values - * in currently running compaction - */ - public void setTotalCompactingKVs(long totalCompactingKVs) { - this.totalCompactingKVs = totalCompactingKVs; - } - - /** - * @param totalStaticBloomSizeKB The total size of all Bloom filter blocks, - * not just loaded into the block cache, in KB. - */ - public void setTotalStaticBloomSizeKB(int totalStaticBloomSizeKB) { - this.totalStaticBloomSizeKB = totalStaticBloomSizeKB; - } - - /** - * @param totalStaticIndexSizeKB The total size of all index blocks, - * not just the root level, in KB. - */ - public void setTotalStaticIndexSizeKB(int totalStaticIndexSizeKB) { - this.totalStaticIndexSizeKB = totalStaticIndexSizeKB; - } - - /** * @param name the region name */ public void setName(byte[] name) { @@ -377,14 +243,9 @@ public class StorageClusterStatusModel * @param name the region name */ public void addRegion(byte[] name, int stores, int storefiles, - int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB, - long readRequestsCount, long writeRequestsCount, int rootIndexSizeKB, - int totalStaticIndexSizeKB, int totalStaticBloomSizeKB, - long totalCompactingKVs, long currentCompactedKVs) { + int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB) { regions.add(new Region(name, stores, storefiles, storefileSizeMB, - memstoreSizeMB, storefileIndexSizeMB, readRequestsCount, - writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB, - totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs)); + memstoreSizeMB, storefileIndexSizeMB)); } /** @@ -671,20 +532,6 @@ public class StorageClusterStatusModel sb.append(region.memstoreSizeMB); sb.append("\n storefileIndexSizeMB="); sb.append(region.storefileIndexSizeMB); - sb.append("\n readRequestsCount="); - sb.append(region.readRequestsCount); - sb.append("\n writeRequestsCount="); - sb.append(region.writeRequestsCount); - sb.append("\n rootIndexSizeKB="); - sb.append(region.rootIndexSizeKB); - sb.append("\n totalStaticIndexSizeKB="); - sb.append(region.totalStaticIndexSizeKB); - sb.append("\n totalStaticBloomSizeKB="); - sb.append(region.totalStaticBloomSizeKB); - sb.append("\n totalCompactingKVs="); - sb.append(region.totalCompactingKVs); - sb.append("\n currentCompactedKVs="); - sb.append(region.currentCompactedKVs); sb.append('\n'); } sb.append('\n'); @@ -702,7 +549,7 @@ public class StorageClusterStatusModel } return sb.toString(); } - + @Override public byte[] createProtobufOutput() { StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder(); @@ -726,13 +573,6 @@ public class StorageClusterStatusModel regionBuilder.setStorefileSizeMB(region.storefileSizeMB); regionBuilder.setMemstoreSizeMB(region.memstoreSizeMB); regionBuilder.setStorefileIndexSizeMB(region.storefileIndexSizeMB); - regionBuilder.setReadRequestsCount(region.readRequestsCount); - regionBuilder.setWriteRequestsCount(region.writeRequestsCount); - regionBuilder.setRootIndexSizeKB(region.rootIndexSizeKB); - regionBuilder.setTotalStaticIndexSizeKB(region.totalStaticIndexSizeKB); - regionBuilder.setTotalStaticBloomSizeKB(region.totalStaticBloomSizeKB); - regionBuilder.setTotalCompactingKVs(region.totalCompactingKVs); - regionBuilder.setCurrentCompactedKVs(region.currentCompactedKVs); nodeBuilder.addRegions(regionBuilder); } builder.addLiveNodes(nodeBuilder); @@ -771,14 +611,7 @@ public class StorageClusterStatusModel region.getStorefiles(), region.getStorefileSizeMB(), region.getMemstoreSizeMB(), - region.getStorefileIndexSizeMB(), - region.getReadRequestsCount(), - region.getWriteRequestsCount(), - region.getRootIndexSizeKB(), - region.getTotalStaticIndexSizeKB(), - region.getTotalStaticBloomSizeKB(), - region.getTotalCompactingKVs(), - region.getCurrentCompactedKVs()); + region.getStorefileIndexSizeMB()); } } for (String node: builder.getDeadNodesList()) { diff --git src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java index 3535595..9e16272 100644 --- src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java +++ src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java @@ -97,9 +97,11 @@ public class TableRegionModel implements Serializable { public String getName() { byte [] tableNameAsBytes = Bytes.toBytes(this.table); byte [] nameAsBytes = HRegionInfo.createRegionName(tableNameAsBytes, - this.startKey, this.id, - !HTableDescriptor.isMetaTable(tableNameAsBytes)); - return Bytes.toString(nameAsBytes); + this.startKey, + this.endKey, + Long.toString(this.id).getBytes(), + !HTableDescriptor.isMetaTable(tableNameAsBytes)); + return Bytes.toStringBinary(nameAsBytes); } /** @@ -111,6 +113,14 @@ public class TableRegionModel implements Serializable { } /** + * @return the table name + */ + @XmlAttribute + public String getTable() { + return table; + } + + /** * @return the start key */ @XmlAttribute @@ -135,15 +145,10 @@ public class TableRegionModel implements Serializable { } /** - * @param name region printable name + * @param table the table name */ - public void setName(String name) { - String split[] = name.split(","); - this.table = split[0]; - this.startKey = Bytes.toBytes(split[1]); - String tail = split[2]; - split = tail.split("\\."); - id = Long.valueOf(split[0]); + public void setTable(String table) { + this.table = table; } /** @@ -187,6 +192,8 @@ public class TableRegionModel implements Serializable { sb.append(Bytes.toString(startKey)); sb.append("'\n endKey='"); sb.append(Bytes.toString(endKey)); + sb.append("'\n table='"); + sb.append(table); if (location != null) { sb.append("'\n location='"); sb.append(location); diff --git src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java index a6023b9..b20d6d4 100644 --- src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java +++ src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java @@ -92,34 +92,6 @@ public final class StorageClusterStatusMessage { // optional int32 storefileIndexSizeMB = 6; boolean hasStorefileIndexSizeMB(); int getStorefileIndexSizeMB(); - - // optional int64 readRequestsCount = 7; - boolean hasReadRequestsCount(); - long getReadRequestsCount(); - - // optional int64 writeRequestsCount = 8; - boolean hasWriteRequestsCount(); - long getWriteRequestsCount(); - - // optional int32 rootIndexSizeKB = 9; - boolean hasRootIndexSizeKB(); - int getRootIndexSizeKB(); - - // optional int32 totalStaticIndexSizeKB = 10; - boolean hasTotalStaticIndexSizeKB(); - int getTotalStaticIndexSizeKB(); - - // optional int32 totalStaticBloomSizeKB = 11; - boolean hasTotalStaticBloomSizeKB(); - int getTotalStaticBloomSizeKB(); - - // optional int64 totalCompactingKVs = 12; - boolean hasTotalCompactingKVs(); - long getTotalCompactingKVs(); - - // optional int64 currentCompactedKVs = 13; - boolean hasCurrentCompactedKVs(); - long getCurrentCompactedKVs(); } public static final class Region extends com.google.protobuf.GeneratedMessage @@ -210,76 +182,6 @@ public final class StorageClusterStatusMessage { return storefileIndexSizeMB_; } - // optional int64 readRequestsCount = 7; - public static final int READREQUESTSCOUNT_FIELD_NUMBER = 7; - private long readRequestsCount_; - public boolean hasReadRequestsCount() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - public long getReadRequestsCount() { - return readRequestsCount_; - } - - // optional int64 writeRequestsCount = 8; - public static final int WRITEREQUESTSCOUNT_FIELD_NUMBER = 8; - private long writeRequestsCount_; - public boolean hasWriteRequestsCount() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - public long getWriteRequestsCount() { - return writeRequestsCount_; - } - - // optional int32 rootIndexSizeKB = 9; - public static final int ROOTINDEXSIZEKB_FIELD_NUMBER = 9; - private int rootIndexSizeKB_; - public boolean hasRootIndexSizeKB() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - public int getRootIndexSizeKB() { - return rootIndexSizeKB_; - } - - // optional int32 totalStaticIndexSizeKB = 10; - public static final int TOTALSTATICINDEXSIZEKB_FIELD_NUMBER = 10; - private int totalStaticIndexSizeKB_; - public boolean hasTotalStaticIndexSizeKB() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - public int getTotalStaticIndexSizeKB() { - return totalStaticIndexSizeKB_; - } - - // optional int32 totalStaticBloomSizeKB = 11; - public static final int TOTALSTATICBLOOMSIZEKB_FIELD_NUMBER = 11; - private int totalStaticBloomSizeKB_; - public boolean hasTotalStaticBloomSizeKB() { - return ((bitField0_ & 0x00000400) == 0x00000400); - } - public int getTotalStaticBloomSizeKB() { - return totalStaticBloomSizeKB_; - } - - // optional int64 totalCompactingKVs = 12; - public static final int TOTALCOMPACTINGKVS_FIELD_NUMBER = 12; - private long totalCompactingKVs_; - public boolean hasTotalCompactingKVs() { - return ((bitField0_ & 0x00000800) == 0x00000800); - } - public long getTotalCompactingKVs() { - return totalCompactingKVs_; - } - - // optional int64 currentCompactedKVs = 13; - public static final int CURRENTCOMPACTEDKVS_FIELD_NUMBER = 13; - private long currentCompactedKVs_; - public boolean hasCurrentCompactedKVs() { - return ((bitField0_ & 0x00001000) == 0x00001000); - } - public long getCurrentCompactedKVs() { - return currentCompactedKVs_; - } - private void initFields() { name_ = com.google.protobuf.ByteString.EMPTY; stores_ = 0; @@ -287,13 +189,6 @@ public final class StorageClusterStatusMessage { storefileSizeMB_ = 0; memstoreSizeMB_ = 0; storefileIndexSizeMB_ = 0; - readRequestsCount_ = 0L; - writeRequestsCount_ = 0L; - rootIndexSizeKB_ = 0; - totalStaticIndexSizeKB_ = 0; - totalStaticBloomSizeKB_ = 0; - totalCompactingKVs_ = 0L; - currentCompactedKVs_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -329,27 +224,6 @@ public final class StorageClusterStatusMessage { if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeInt32(6, storefileIndexSizeMB_); } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeInt64(7, readRequestsCount_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - output.writeInt64(8, writeRequestsCount_); - } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - output.writeInt32(9, rootIndexSizeKB_); - } - if (((bitField0_ & 0x00000200) == 0x00000200)) { - output.writeInt32(10, totalStaticIndexSizeKB_); - } - if (((bitField0_ & 0x00000400) == 0x00000400)) { - output.writeInt32(11, totalStaticBloomSizeKB_); - } - if (((bitField0_ & 0x00000800) == 0x00000800)) { - output.writeInt64(12, totalCompactingKVs_); - } - if (((bitField0_ & 0x00001000) == 0x00001000)) { - output.writeInt64(13, currentCompactedKVs_); - } getUnknownFields().writeTo(output); } @@ -383,34 +257,6 @@ public final class StorageClusterStatusMessage { size += com.google.protobuf.CodedOutputStream .computeInt32Size(6, storefileIndexSizeMB_); } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(7, readRequestsCount_); - } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(8, writeRequestsCount_); - } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(9, rootIndexSizeKB_); - } - if (((bitField0_ & 0x00000200) == 0x00000200)) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(10, totalStaticIndexSizeKB_); - } - if (((bitField0_ & 0x00000400) == 0x00000400)) { - size += com.google.protobuf.CodedOutputStream - .computeInt32Size(11, totalStaticBloomSizeKB_); - } - if (((bitField0_ & 0x00000800) == 0x00000800)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(12, totalCompactingKVs_); - } - if (((bitField0_ & 0x00001000) == 0x00001000)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(13, currentCompactedKVs_); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -547,20 +393,6 @@ public final class StorageClusterStatusMessage { bitField0_ = (bitField0_ & ~0x00000010); storefileIndexSizeMB_ = 0; bitField0_ = (bitField0_ & ~0x00000020); - readRequestsCount_ = 0L; - bitField0_ = (bitField0_ & ~0x00000040); - writeRequestsCount_ = 0L; - bitField0_ = (bitField0_ & ~0x00000080); - rootIndexSizeKB_ = 0; - bitField0_ = (bitField0_ & ~0x00000100); - totalStaticIndexSizeKB_ = 0; - bitField0_ = (bitField0_ & ~0x00000200); - totalStaticBloomSizeKB_ = 0; - bitField0_ = (bitField0_ & ~0x00000400); - totalCompactingKVs_ = 0L; - bitField0_ = (bitField0_ & ~0x00000800); - currentCompactedKVs_ = 0L; - bitField0_ = (bitField0_ & ~0x00001000); return this; } @@ -623,34 +455,6 @@ public final class StorageClusterStatusMessage { to_bitField0_ |= 0x00000020; } result.storefileIndexSizeMB_ = storefileIndexSizeMB_; - if (((from_bitField0_ & 0x00000040) == 0x00000040)) { - to_bitField0_ |= 0x00000040; - } - result.readRequestsCount_ = readRequestsCount_; - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000080; - } - result.writeRequestsCount_ = writeRequestsCount_; - if (((from_bitField0_ & 0x00000100) == 0x00000100)) { - to_bitField0_ |= 0x00000100; - } - result.rootIndexSizeKB_ = rootIndexSizeKB_; - if (((from_bitField0_ & 0x00000200) == 0x00000200)) { - to_bitField0_ |= 0x00000200; - } - result.totalStaticIndexSizeKB_ = totalStaticIndexSizeKB_; - if (((from_bitField0_ & 0x00000400) == 0x00000400)) { - to_bitField0_ |= 0x00000400; - } - result.totalStaticBloomSizeKB_ = totalStaticBloomSizeKB_; - if (((from_bitField0_ & 0x00000800) == 0x00000800)) { - to_bitField0_ |= 0x00000800; - } - result.totalCompactingKVs_ = totalCompactingKVs_; - if (((from_bitField0_ & 0x00001000) == 0x00001000)) { - to_bitField0_ |= 0x00001000; - } - result.currentCompactedKVs_ = currentCompactedKVs_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -685,27 +489,6 @@ public final class StorageClusterStatusMessage { if (other.hasStorefileIndexSizeMB()) { setStorefileIndexSizeMB(other.getStorefileIndexSizeMB()); } - if (other.hasReadRequestsCount()) { - setReadRequestsCount(other.getReadRequestsCount()); - } - if (other.hasWriteRequestsCount()) { - setWriteRequestsCount(other.getWriteRequestsCount()); - } - if (other.hasRootIndexSizeKB()) { - setRootIndexSizeKB(other.getRootIndexSizeKB()); - } - if (other.hasTotalStaticIndexSizeKB()) { - setTotalStaticIndexSizeKB(other.getTotalStaticIndexSizeKB()); - } - if (other.hasTotalStaticBloomSizeKB()) { - setTotalStaticBloomSizeKB(other.getTotalStaticBloomSizeKB()); - } - if (other.hasTotalCompactingKVs()) { - setTotalCompactingKVs(other.getTotalCompactingKVs()); - } - if (other.hasCurrentCompactedKVs()) { - setCurrentCompactedKVs(other.getCurrentCompactedKVs()); - } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -771,41 +554,6 @@ public final class StorageClusterStatusMessage { storefileIndexSizeMB_ = input.readInt32(); break; } - case 56: { - bitField0_ |= 0x00000040; - readRequestsCount_ = input.readInt64(); - break; - } - case 64: { - bitField0_ |= 0x00000080; - writeRequestsCount_ = input.readInt64(); - break; - } - case 72: { - bitField0_ |= 0x00000100; - rootIndexSizeKB_ = input.readInt32(); - break; - } - case 80: { - bitField0_ |= 0x00000200; - totalStaticIndexSizeKB_ = input.readInt32(); - break; - } - case 88: { - bitField0_ |= 0x00000400; - totalStaticBloomSizeKB_ = input.readInt32(); - break; - } - case 96: { - bitField0_ |= 0x00000800; - totalCompactingKVs_ = input.readInt64(); - break; - } - case 104: { - bitField0_ |= 0x00001000; - currentCompactedKVs_ = input.readInt64(); - break; - } } } } @@ -941,153 +689,6 @@ public final class StorageClusterStatusMessage { return this; } - // optional int64 readRequestsCount = 7; - private long readRequestsCount_ ; - public boolean hasReadRequestsCount() { - return ((bitField0_ & 0x00000040) == 0x00000040); - } - public long getReadRequestsCount() { - return readRequestsCount_; - } - public Builder setReadRequestsCount(long value) { - bitField0_ |= 0x00000040; - readRequestsCount_ = value; - onChanged(); - return this; - } - public Builder clearReadRequestsCount() { - bitField0_ = (bitField0_ & ~0x00000040); - readRequestsCount_ = 0L; - onChanged(); - return this; - } - - // optional int64 writeRequestsCount = 8; - private long writeRequestsCount_ ; - public boolean hasWriteRequestsCount() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - public long getWriteRequestsCount() { - return writeRequestsCount_; - } - public Builder setWriteRequestsCount(long value) { - bitField0_ |= 0x00000080; - writeRequestsCount_ = value; - onChanged(); - return this; - } - public Builder clearWriteRequestsCount() { - bitField0_ = (bitField0_ & ~0x00000080); - writeRequestsCount_ = 0L; - onChanged(); - return this; - } - - // optional int32 rootIndexSizeKB = 9; - private int rootIndexSizeKB_ ; - public boolean hasRootIndexSizeKB() { - return ((bitField0_ & 0x00000100) == 0x00000100); - } - public int getRootIndexSizeKB() { - return rootIndexSizeKB_; - } - public Builder setRootIndexSizeKB(int value) { - bitField0_ |= 0x00000100; - rootIndexSizeKB_ = value; - onChanged(); - return this; - } - public Builder clearRootIndexSizeKB() { - bitField0_ = (bitField0_ & ~0x00000100); - rootIndexSizeKB_ = 0; - onChanged(); - return this; - } - - // optional int32 totalStaticIndexSizeKB = 10; - private int totalStaticIndexSizeKB_ ; - public boolean hasTotalStaticIndexSizeKB() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - public int getTotalStaticIndexSizeKB() { - return totalStaticIndexSizeKB_; - } - public Builder setTotalStaticIndexSizeKB(int value) { - bitField0_ |= 0x00000200; - totalStaticIndexSizeKB_ = value; - onChanged(); - return this; - } - public Builder clearTotalStaticIndexSizeKB() { - bitField0_ = (bitField0_ & ~0x00000200); - totalStaticIndexSizeKB_ = 0; - onChanged(); - return this; - } - - // optional int32 totalStaticBloomSizeKB = 11; - private int totalStaticBloomSizeKB_ ; - public boolean hasTotalStaticBloomSizeKB() { - return ((bitField0_ & 0x00000400) == 0x00000400); - } - public int getTotalStaticBloomSizeKB() { - return totalStaticBloomSizeKB_; - } - public Builder setTotalStaticBloomSizeKB(int value) { - bitField0_ |= 0x00000400; - totalStaticBloomSizeKB_ = value; - onChanged(); - return this; - } - public Builder clearTotalStaticBloomSizeKB() { - bitField0_ = (bitField0_ & ~0x00000400); - totalStaticBloomSizeKB_ = 0; - onChanged(); - return this; - } - - // optional int64 totalCompactingKVs = 12; - private long totalCompactingKVs_ ; - public boolean hasTotalCompactingKVs() { - return ((bitField0_ & 0x00000800) == 0x00000800); - } - public long getTotalCompactingKVs() { - return totalCompactingKVs_; - } - public Builder setTotalCompactingKVs(long value) { - bitField0_ |= 0x00000800; - totalCompactingKVs_ = value; - onChanged(); - return this; - } - public Builder clearTotalCompactingKVs() { - bitField0_ = (bitField0_ & ~0x00000800); - totalCompactingKVs_ = 0L; - onChanged(); - return this; - } - - // optional int64 currentCompactedKVs = 13; - private long currentCompactedKVs_ ; - public boolean hasCurrentCompactedKVs() { - return ((bitField0_ & 0x00001000) == 0x00001000); - } - public long getCurrentCompactedKVs() { - return currentCompactedKVs_; - } - public Builder setCurrentCompactedKVs(long value) { - bitField0_ |= 0x00001000; - currentCompactedKVs_ = value; - onChanged(); - return this; - } - public Builder clearCurrentCompactedKVs() { - bitField0_ = (bitField0_ & ~0x00001000); - currentCompactedKVs_ = 0L; - onChanged(); - return this; - } - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region) } @@ -2811,25 +2412,20 @@ public final class StorageClusterStatusMessage { java.lang.String[] descriptorData = { "\n!StorageClusterStatusMessage.proto\022/org" + ".apache.hadoop.hbase.rest.protobuf.gener" + - "ated\"\333\005\n\024StorageClusterStatus\022]\n\tliveNod" + + "ated\"\222\004\n\024StorageClusterStatus\022]\n\tliveNod" + "es\030\001 \003(\0132J.org.apache.hadoop.hbase.rest." + "protobuf.generated.StorageClusterStatus." + "Node\022\021\n\tdeadNodes\030\002 \003(\t\022\017\n\007regions\030\003 \001(\005" + "\022\020\n\010requests\030\004 \001(\005\022\023\n\013averageLoad\030\005 \001(\001\032" + - "\322\002\n\006Region\022\014\n\004name\030\001 \002(\014\022\016\n\006stores\030\002 \001(\005" + + "\211\001\n\006Region\022\014\n\004name\030\001 \002(\014\022\016\n\006stores\030\002 \001(\005" + "\022\022\n\nstorefiles\030\003 \001(\005\022\027\n\017storefileSizeMB\030" + "\004 \001(\005\022\026\n\016memstoreSizeMB\030\005 \001(\005\022\034\n\024storefi", - "leIndexSizeMB\030\006 \001(\005\022\031\n\021readRequestsCount" + - "\030\007 \001(\003\022\032\n\022writeRequestsCount\030\010 \001(\003\022\027\n\017ro" + - "otIndexSizeKB\030\t \001(\005\022\036\n\026totalStaticIndexS" + - "izeKB\030\n \001(\005\022\036\n\026totalStaticBloomSizeKB\030\013 " + - "\001(\005\022\032\n\022totalCompactingKVs\030\014 \001(\003\022\033\n\023curre" + - "ntCompactedKVs\030\r \001(\003\032\303\001\n\004Node\022\014\n\004name\030\001 " + - "\002(\t\022\021\n\tstartCode\030\002 \001(\003\022\020\n\010requests\030\003 \001(\005" + - "\022\022\n\nheapSizeMB\030\004 \001(\005\022\025\n\rmaxHeapSizeMB\030\005 " + - "\001(\005\022]\n\007regions\030\006 \003(\0132L.org.apache.hadoop" + - ".hbase.rest.protobuf.generated.StorageCl", - "usterStatus.Region" + "leIndexSizeMB\030\006 \001(\005\032\303\001\n\004Node\022\014\n\004name\030\001 \002" + + "(\t\022\021\n\tstartCode\030\002 \001(\003\022\020\n\010requests\030\003 \001(\005\022" + + "\022\n\nheapSizeMB\030\004 \001(\005\022\025\n\rmaxHeapSizeMB\030\005 \001" + + "(\005\022]\n\007regions\030\006 \003(\0132L.org.apache.hadoop." + + "hbase.rest.protobuf.generated.StorageClu" + + "sterStatus.Region" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -2849,7 +2445,7 @@ public final class StorageClusterStatusMessage { internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor, - new java.lang.String[] { "Name", "Stores", "Storefiles", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "TotalCompactingKVs", "CurrentCompactedKVs", }, + new java.lang.String[] { "Name", "Stores", "Storefiles", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", }, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.class, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder.class); internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor = diff --git src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java index 60eb426..e045a6c 100644 --- src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java +++ src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java @@ -1130,7 +1130,7 @@ public class ThriftServerRunner implements Runnable { @Override public List scannerGet(int id) throws IllegalArgument, IOError { - return scannerGetList(id,1); + return scannerGetList(id, 1); } public int scannerOpenWithScan(ByteBuffer tableName, TScan tScan, @@ -1331,26 +1331,18 @@ public class ThriftServerRunner implements Runnable { } @Override - public List getRowOrBefore(ByteBuffer tableName, ByteBuffer row, - ByteBuffer family) throws IOError { - try { - HTable table = getTable(getBytes(tableName)); - Result result = table.getRowOrBefore(getBytes(row), getBytes(family)); - return ThriftUtilities.cellFromHBase(result.raw()); - } catch (IOException e) { - LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); - } - } - - @Override public TRegionInfo getRegionInfo(ByteBuffer searchRow) throws IOError { try { HTable table = getTable(HConstants.META_TABLE_NAME); byte[] row = getBytes(searchRow); - Result startRowResult = table.getRowOrBefore( - row, HConstants.CATALOG_FAMILY); + Scan scan = new Scan(); + scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + byte[] b = new byte[searchRow.remaining()]; + searchRow.get(b, 0, b.length); + scan.setStartRow(b); + ResultScanner scanner = table.getScanner(scan); + Result startRowResult = scanner.next(); if (startRowResult == null) { throw new IOException("Cannot find row in .META., row=" + Bytes.toStringBinary(row)); diff --git src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java index a5b81f5..abeac63 100644 --- src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java +++ src/main/java/org/apache/hadoop/hbase/thrift/generated/AlreadyExists.java @@ -6,6 +6,7 @@ */ package org.apache.hadoop.hbase.thrift.generated; +import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.thrift.scheme.IScheme; import org.apache.thrift.scheme.SchemeFactory; import org.apache.thrift.scheme.StandardScheme; @@ -226,7 +227,14 @@ public class AlreadyExists extends Exception implements org.apache.thrift.TBase< @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_message = true && (isSetMessage()); + builder.append(present_message); + if (present_message) + builder.append(message); + + return builder.toHashCode(); } public int compareTo(AlreadyExists other) { diff --git src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java index d5df940..d535baf 100644 --- src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java +++ src/main/java/org/apache/hadoop/hbase/thrift/generated/BatchMutation.java @@ -6,6 +6,7 @@ */ package org.apache.hadoop.hbase.thrift.generated; +import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.thrift.scheme.IScheme; import org.apache.thrift.scheme.SchemeFactory; import org.apache.thrift.scheme.StandardScheme; @@ -314,7 +315,19 @@ public class BatchMutation implements org.apache.thrift.TBase(_list0.size); for (int _i1 = 0; _i1 < _list0.size; ++_i1) { - Mutation _elem2; // optional + Mutation _elem2; // required _elem2 = new Mutation(); _elem2.read(iprot); struct.mutations.add(_elem2); @@ -534,7 +547,7 @@ public class BatchMutation implements org.apache.thrift.TBase(_list5.size); for (int _i6 = 0; _i6 < _list5.size; ++_i6) { - Mutation _elem7; // optional + Mutation _elem7; // required _elem7 = new Mutation(); _elem7.read(iprot); struct.mutations.add(_elem7); diff --git src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java index 4ce85e7..047de4a 100644 --- src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java +++ src/main/java/org/apache/hadoop/hbase/thrift/generated/ColumnDescriptor.java @@ -6,6 +6,7 @@ */ package org.apache.hadoop.hbase.thrift.generated; +import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.thrift.scheme.IScheme; import org.apache.thrift.scheme.SchemeFactory; import org.apache.thrift.scheme.StandardScheme; @@ -730,7 +731,54 @@ public class ColumnDescriptor implements org.apache.thrift.TBase mutations, Map attributes) throws IOError, IllegalArgument, org.apache.thrift.TException; @@ -319,7 +320,7 @@ public class Hbase { * * @param timestamp timestamp * - * @param attributes Put attributes + * @param attributes Mutation attributes */ public void mutateRowTs(ByteBuffer tableName, ByteBuffer row, List mutations, long timestamp, Map attributes) throws IOError, IllegalArgument, org.apache.thrift.TException; @@ -333,7 +334,7 @@ public class Hbase { * * @param rowBatches list of row batches * - * @param attributes Put attributes + * @param attributes Mutation attributes */ public void mutateRows(ByteBuffer tableName, List rowBatches, Map attributes) throws IOError, IllegalArgument, org.apache.thrift.TException; @@ -349,7 +350,7 @@ public class Hbase { * * @param timestamp timestamp * - * @param attributes Put attributes + * @param attributes Mutation attributes */ public void mutateRowsTs(ByteBuffer tableName, List rowBatches, long timestamp, Map attributes) throws IOError, IllegalArgument, org.apache.thrift.TException; @@ -582,19 +583,6 @@ public class Hbase { public void scannerClose(int id) throws IOError, IllegalArgument, org.apache.thrift.TException; /** - * Get the row just before the specified one. - * - * @return value for specified row/column - * - * @param tableName name of table - * - * @param row row key - * - * @param family column name - */ - public List getRowOrBefore(ByteBuffer tableName, ByteBuffer row, ByteBuffer family) throws IOError, org.apache.thrift.TException; - - /** * Get the regininfo for the specified row. It scans * the metatable to find region's start and end keys. * @@ -686,8 +674,6 @@ public class Hbase { public void scannerClose(int id, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void getRowOrBefore(ByteBuffer tableName, ByteBuffer row, ByteBuffer family, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void getRegionInfo(ByteBuffer row, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; } @@ -1796,34 +1782,6 @@ public class Hbase { return; } - public List getRowOrBefore(ByteBuffer tableName, ByteBuffer row, ByteBuffer family) throws IOError, org.apache.thrift.TException - { - send_getRowOrBefore(tableName, row, family); - return recv_getRowOrBefore(); - } - - public void send_getRowOrBefore(ByteBuffer tableName, ByteBuffer row, ByteBuffer family) throws org.apache.thrift.TException - { - getRowOrBefore_args args = new getRowOrBefore_args(); - args.setTableName(tableName); - args.setRow(row); - args.setFamily(family); - sendBase("getRowOrBefore", args); - } - - public List recv_getRowOrBefore() throws IOError, org.apache.thrift.TException - { - getRowOrBefore_result result = new getRowOrBefore_result(); - receiveBase(result, "getRowOrBefore"); - if (result.isSetSuccess()) { - return result.success; - } - if (result.io != null) { - throw result.io; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getRowOrBefore failed: unknown result"); - } - public TRegionInfo getRegionInfo(ByteBuffer row) throws IOError, org.apache.thrift.TException { send_getRegionInfo(row); @@ -3371,44 +3329,6 @@ public class Hbase { } } - public void getRowOrBefore(ByteBuffer tableName, ByteBuffer row, ByteBuffer family, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - getRowOrBefore_call method_call = new getRowOrBefore_call(tableName, row, family, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class getRowOrBefore_call extends org.apache.thrift.async.TAsyncMethodCall { - private ByteBuffer tableName; - private ByteBuffer row; - private ByteBuffer family; - public getRowOrBefore_call(ByteBuffer tableName, ByteBuffer row, ByteBuffer family, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.tableName = tableName; - this.row = row; - this.family = family; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getRowOrBefore", org.apache.thrift.protocol.TMessageType.CALL, 0)); - getRowOrBefore_args args = new getRowOrBefore_args(); - args.setTableName(tableName); - args.setRow(row); - args.setFamily(family); - args.write(prot); - prot.writeMessageEnd(); - } - - public List getResult() throws IOError, org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_getRowOrBefore(); - } - } - public void getRegionInfo(ByteBuffer row, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); getRegionInfo_call method_call = new getRegionInfo_call(row, resultHandler, this, ___protocolFactory, ___transport); @@ -3493,7 +3413,6 @@ public class Hbase { processMap.put("scannerGet", new scannerGet()); processMap.put("scannerGetList", new scannerGetList()); processMap.put("scannerClose", new scannerClose()); - processMap.put("getRowOrBefore", new getRowOrBefore()); processMap.put("getRegionInfo", new getRegionInfo()); return processMap; } @@ -4306,26 +4225,6 @@ public class Hbase { } } - private static class getRowOrBefore extends org.apache.thrift.ProcessFunction { - public getRowOrBefore() { - super("getRowOrBefore"); - } - - protected getRowOrBefore_args getEmptyArgsInstance() { - return new getRowOrBefore_args(); - } - - protected getRowOrBefore_result getResult(I iface, getRowOrBefore_args args) throws org.apache.thrift.TException { - getRowOrBefore_result result = new getRowOrBefore_result(); - try { - result.success = iface.getRowOrBefore(args.tableName, args.row, args.family); - } catch (IOError io) { - result.io = io; - } - return result; - } - } - private static class getRegionInfo extends org.apache.thrift.ProcessFunction { public getRegionInfo() { super("getRegionInfo"); @@ -4565,7 +4464,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + return builder.toHashCode(); } public int compareTo(enableTable_args other) { @@ -4918,7 +4824,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(enableTable_result other) { @@ -5295,7 +5208,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + return builder.toHashCode(); } public int compareTo(disableTable_args other) { @@ -5648,7 +5568,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(disableTable_result other) { @@ -6025,7 +5952,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + return builder.toHashCode(); } public int compareTo(isTableEnabled_args other) { @@ -6440,7 +6374,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true; + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(isTableEnabled_result other) { @@ -6520,6 +6466,8 @@ public class Hbase { private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bit_vector = new BitSet(1); read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -6840,7 +6788,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableNameOrRegionName = true && (isSetTableNameOrRegionName()); + builder.append(present_tableNameOrRegionName); + if (present_tableNameOrRegionName) + builder.append(tableNameOrRegionName); + + return builder.toHashCode(); } public int compareTo(compact_args other) { @@ -7193,7 +7148,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(compact_result other) { @@ -7558,7 +7520,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableNameOrRegionName = true && (isSetTableNameOrRegionName()); + builder.append(present_tableNameOrRegionName); + if (present_tableNameOrRegionName) + builder.append(tableNameOrRegionName); + + return builder.toHashCode(); } public int compareTo(majorCompact_args other) { @@ -7911,7 +7880,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(majorCompact_result other) { @@ -8201,7 +8177,9 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + return builder.toHashCode(); } public int compareTo(getTableNames_args other) { @@ -8590,7 +8568,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(getTableNames_result other) { @@ -8705,7 +8695,7 @@ public class Hbase { struct.success = new ArrayList(_list26.size); for (int _i27 = 0; _i27 < _list26.size; ++_i27) { - ByteBuffer _elem28; // optional + ByteBuffer _elem28; // required _elem28 = iprot.readBinary(); struct.success.add(_elem28); } @@ -8806,7 +8796,7 @@ public class Hbase { struct.success = new ArrayList(_list31.size); for (int _i32 = 0; _i32 < _list31.size; ++_i32) { - ByteBuffer _elem33; // optional + ByteBuffer _elem33; // required _elem33 = iprot.readBinary(); struct.success.add(_elem33); } @@ -9040,7 +9030,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + return builder.toHashCode(); } public int compareTo(getColumnDescriptors_args other) { @@ -9477,7 +9474,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(getColumnDescriptors_result other) { @@ -9593,7 +9602,7 @@ public class Hbase { for (int _i35 = 0; _i35 < _map34.size; ++_i35) { ByteBuffer _key36; // required - ColumnDescriptor _val37; // required + ColumnDescriptor _val37; // optional _key36 = iprot.readBinary(); _val37 = new ColumnDescriptor(); _val37.read(iprot); @@ -9699,7 +9708,7 @@ public class Hbase { for (int _i41 = 0; _i41 < _map40.size; ++_i41) { ByteBuffer _key42; // required - ColumnDescriptor _val43; // required + ColumnDescriptor _val43; // optional _key42 = iprot.readBinary(); _val43 = new ColumnDescriptor(); _val43.read(iprot); @@ -9935,7 +9944,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + return builder.toHashCode(); } public int compareTo(getTableRegions_args other) { @@ -10367,7 +10383,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(getTableRegions_result other) { @@ -10482,7 +10510,7 @@ public class Hbase { struct.success = new ArrayList(_list44.size); for (int _i45 = 0; _i45 < _list44.size; ++_i45) { - TRegionInfo _elem46; // optional + TRegionInfo _elem46; // required _elem46 = new TRegionInfo(); _elem46.read(iprot); struct.success.add(_elem46); @@ -10584,7 +10612,7 @@ public class Hbase { struct.success = new ArrayList(_list49.size); for (int _i50 = 0; _i50 < _list49.size; ++_i50) { - TRegionInfo _elem51; // optional + TRegionInfo _elem51; // required _elem51 = new TRegionInfo(); _elem51.read(iprot); struct.success.add(_elem51); @@ -10910,7 +10938,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_columnFamilies = true && (isSetColumnFamilies()); + builder.append(present_columnFamilies); + if (present_columnFamilies) + builder.append(columnFamilies); + + return builder.toHashCode(); } public int compareTo(createTable_args other) { @@ -11033,7 +11073,7 @@ public class Hbase { struct.columnFamilies = new ArrayList(_list52.size); for (int _i53 = 0; _i53 < _list52.size; ++_i53) { - ColumnDescriptor _elem54; // optional + ColumnDescriptor _elem54; // required _elem54 = new ColumnDescriptor(); _elem54.read(iprot); struct.columnFamilies.add(_elem54); @@ -11130,7 +11170,7 @@ public class Hbase { struct.columnFamilies = new ArrayList(_list57.size); for (int _i58 = 0; _i58 < _list57.size; ++_i58) { - ColumnDescriptor _elem59; // optional + ColumnDescriptor _elem59; // required _elem59 = new ColumnDescriptor(); _elem59.read(iprot); struct.columnFamilies.add(_elem59); @@ -11456,7 +11496,24 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + boolean present_ia = true && (isSetIa()); + builder.append(present_ia); + if (present_ia) + builder.append(ia); + + boolean present_exist = true && (isSetExist()); + builder.append(present_exist); + if (present_exist) + builder.append(exist); + + return builder.toHashCode(); } public int compareTo(createTable_result other) { @@ -11919,7 +11976,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + return builder.toHashCode(); } public int compareTo(deleteTable_args other) { @@ -12272,7 +12336,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(deleteTable_result other) { @@ -12907,7 +12978,29 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_row = true && (isSetRow()); + builder.append(present_row); + if (present_row) + builder.append(row); + + boolean present_column = true && (isSetColumn()); + builder.append(present_column); + if (present_column) + builder.append(column); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(get_args other) { @@ -13083,7 +13176,7 @@ public class Hbase { for (int _i61 = 0; _i61 < _map60.size; ++_i61) { ByteBuffer _key62; // required - ByteBuffer _val63; // required + ByteBuffer _val63; // optional _key62 = iprot.readBinary(); _val63 = iprot.readBinary(); struct.attributes.put(_key62, _val63); @@ -13213,7 +13306,7 @@ public class Hbase { for (int _i67 = 0; _i67 < _map66.size; ++_i67) { ByteBuffer _key68; // required - ByteBuffer _val69; // required + ByteBuffer _val69; // optional _key68 = iprot.readBinary(); _val69 = iprot.readBinary(); struct.attributes.put(_key68, _val69); @@ -13500,7 +13593,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(get_result other) { @@ -13615,7 +13720,7 @@ public class Hbase { struct.success = new ArrayList(_list70.size); for (int _i71 = 0; _i71 < _list70.size; ++_i71) { - TCell _elem72; // optional + TCell _elem72; // required _elem72 = new TCell(); _elem72.read(iprot); struct.success.add(_elem72); @@ -13717,7 +13822,7 @@ public class Hbase { struct.success = new ArrayList(_list75.size); for (int _i76 = 0; _i76 < _list75.size; ++_i76) { - TCell _elem77; // optional + TCell _elem77; // required _elem77 = new TCell(); _elem77.read(iprot); struct.success.add(_elem77); @@ -14284,7 +14389,34 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_row = true && (isSetRow()); + builder.append(present_row); + if (present_row) + builder.append(row); + + boolean present_column = true && (isSetColumn()); + builder.append(present_column); + if (present_column) + builder.append(column); + + boolean present_numVersions = true; + builder.append(present_numVersions); + if (present_numVersions) + builder.append(numVersions); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(getVer_args other) { @@ -14484,7 +14616,7 @@ public class Hbase { for (int _i79 = 0; _i79 < _map78.size; ++_i79) { ByteBuffer _key80; // required - ByteBuffer _val81; // required + ByteBuffer _val81; // optional _key80 = iprot.readBinary(); _val81 = iprot.readBinary(); struct.attributes.put(_key80, _val81); @@ -14627,7 +14759,7 @@ public class Hbase { for (int _i85 = 0; _i85 < _map84.size; ++_i85) { ByteBuffer _key86; // required - ByteBuffer _val87; // required + ByteBuffer _val87; // optional _key86 = iprot.readBinary(); _val87 = iprot.readBinary(); struct.attributes.put(_key86, _val87); @@ -14914,7 +15046,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(getVer_result other) { @@ -15029,7 +15173,7 @@ public class Hbase { struct.success = new ArrayList(_list88.size); for (int _i89 = 0; _i89 < _list88.size; ++_i89) { - TCell _elem90; // optional + TCell _elem90; // required _elem90 = new TCell(); _elem90.read(iprot); struct.success.add(_elem90); @@ -15131,7 +15275,7 @@ public class Hbase { struct.success = new ArrayList(_list93.size); for (int _i94 = 0; _i94 < _list93.size; ++_i94) { - TCell _elem95; // optional + TCell _elem95; // required _elem95 = new TCell(); _elem95.read(iprot); struct.success.add(_elem95); @@ -15769,7 +15913,39 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_row = true && (isSetRow()); + builder.append(present_row); + if (present_row) + builder.append(row); + + boolean present_column = true && (isSetColumn()); + builder.append(present_column); + if (present_column) + builder.append(column); + + boolean present_timestamp = true; + builder.append(present_timestamp); + if (present_timestamp) + builder.append(timestamp); + + boolean present_numVersions = true; + builder.append(present_numVersions); + if (present_numVersions) + builder.append(numVersions); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(getVerTs_args other) { @@ -15989,7 +16165,7 @@ public class Hbase { for (int _i97 = 0; _i97 < _map96.size; ++_i97) { ByteBuffer _key98; // required - ByteBuffer _val99; // required + ByteBuffer _val99; // optional _key98 = iprot.readBinary(); _val99 = iprot.readBinary(); struct.attributes.put(_key98, _val99); @@ -16145,7 +16321,7 @@ public class Hbase { for (int _i103 = 0; _i103 < _map102.size; ++_i103) { ByteBuffer _key104; // required - ByteBuffer _val105; // required + ByteBuffer _val105; // optional _key104 = iprot.readBinary(); _val105 = iprot.readBinary(); struct.attributes.put(_key104, _val105); @@ -16432,7 +16608,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(getVerTs_result other) { @@ -16547,7 +16735,7 @@ public class Hbase { struct.success = new ArrayList(_list106.size); for (int _i107 = 0; _i107 < _list106.size; ++_i107) { - TCell _elem108; // optional + TCell _elem108; // required _elem108 = new TCell(); _elem108.read(iprot); struct.success.add(_elem108); @@ -16649,7 +16837,7 @@ public class Hbase { struct.success = new ArrayList(_list111.size); for (int _i112 = 0; _i112 < _list111.size; ++_i112) { - TCell _elem113; // optional + TCell _elem113; // required _elem113 = new TCell(); _elem113.read(iprot); struct.success.add(_elem113); @@ -17061,7 +17249,24 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_row = true && (isSetRow()); + builder.append(present_row); + if (present_row) + builder.append(row); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(getRow_args other) { @@ -17211,7 +17416,7 @@ public class Hbase { for (int _i115 = 0; _i115 < _map114.size; ++_i115) { ByteBuffer _key116; // required - ByteBuffer _val117; // required + ByteBuffer _val117; // optional _key116 = iprot.readBinary(); _val117 = iprot.readBinary(); struct.attributes.put(_key116, _val117); @@ -17326,7 +17531,7 @@ public class Hbase { for (int _i121 = 0; _i121 < _map120.size; ++_i121) { ByteBuffer _key122; // required - ByteBuffer _val123; // required + ByteBuffer _val123; // optional _key122 = iprot.readBinary(); _val123 = iprot.readBinary(); struct.attributes.put(_key122, _val123); @@ -17613,7 +17818,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(getRow_result other) { @@ -17728,7 +17945,7 @@ public class Hbase { struct.success = new ArrayList(_list124.size); for (int _i125 = 0; _i125 < _list124.size; ++_i125) { - TRowResult _elem126; // optional + TRowResult _elem126; // required _elem126 = new TRowResult(); _elem126.read(iprot); struct.success.add(_elem126); @@ -17830,7 +18047,7 @@ public class Hbase { struct.success = new ArrayList(_list129.size); for (int _i130 = 0; _i130 < _list129.size; ++_i130) { - TRowResult _elem131; // optional + TRowResult _elem131; // required _elem131 = new TRowResult(); _elem131.read(iprot); struct.success.add(_elem131); @@ -18333,7 +18550,29 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_row = true && (isSetRow()); + builder.append(present_row); + if (present_row) + builder.append(row); + + boolean present_columns = true && (isSetColumns()); + builder.append(present_columns); + if (present_columns) + builder.append(columns); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(getRowWithColumns_args other) { @@ -18500,7 +18739,7 @@ public class Hbase { struct.columns = new ArrayList(_list132.size); for (int _i133 = 0; _i133 < _list132.size; ++_i133) { - ByteBuffer _elem134; // optional + ByteBuffer _elem134; // required _elem134 = iprot.readBinary(); struct.columns.add(_elem134); } @@ -18519,7 +18758,7 @@ public class Hbase { for (int _i136 = 0; _i136 < _map135.size; ++_i136) { ByteBuffer _key137; // required - ByteBuffer _val138; // required + ByteBuffer _val138; // optional _key137 = iprot.readBinary(); _val138 = iprot.readBinary(); struct.attributes.put(_key137, _val138); @@ -18657,7 +18896,7 @@ public class Hbase { struct.columns = new ArrayList(_list143.size); for (int _i144 = 0; _i144 < _list143.size; ++_i144) { - ByteBuffer _elem145; // optional + ByteBuffer _elem145; // required _elem145 = iprot.readBinary(); struct.columns.add(_elem145); } @@ -18671,7 +18910,7 @@ public class Hbase { for (int _i147 = 0; _i147 < _map146.size; ++_i147) { ByteBuffer _key148; // required - ByteBuffer _val149; // required + ByteBuffer _val149; // optional _key148 = iprot.readBinary(); _val149 = iprot.readBinary(); struct.attributes.put(_key148, _val149); @@ -18958,7 +19197,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(getRowWithColumns_result other) { @@ -19073,7 +19324,7 @@ public class Hbase { struct.success = new ArrayList(_list150.size); for (int _i151 = 0; _i151 < _list150.size; ++_i151) { - TRowResult _elem152; // optional + TRowResult _elem152; // required _elem152 = new TRowResult(); _elem152.read(iprot); struct.success.add(_elem152); @@ -19175,7 +19426,7 @@ public class Hbase { struct.success = new ArrayList(_list155.size); for (int _i156 = 0; _i156 < _list155.size; ++_i156) { - TRowResult _elem157; // optional + TRowResult _elem157; // required _elem157 = new TRowResult(); _elem157.read(iprot); struct.success.add(_elem157); @@ -19661,7 +19912,29 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_row = true && (isSetRow()); + builder.append(present_row); + if (present_row) + builder.append(row); + + boolean present_timestamp = true; + builder.append(present_timestamp); + if (present_timestamp) + builder.append(timestamp); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(getRowTs_args other) { @@ -19835,7 +20108,7 @@ public class Hbase { for (int _i159 = 0; _i159 < _map158.size; ++_i159) { ByteBuffer _key160; // required - ByteBuffer _val161; // required + ByteBuffer _val161; // optional _key160 = iprot.readBinary(); _val161 = iprot.readBinary(); struct.attributes.put(_key160, _val161); @@ -19963,7 +20236,7 @@ public class Hbase { for (int _i165 = 0; _i165 < _map164.size; ++_i165) { ByteBuffer _key166; // required - ByteBuffer _val167; // required + ByteBuffer _val167; // optional _key166 = iprot.readBinary(); _val167 = iprot.readBinary(); struct.attributes.put(_key166, _val167); @@ -20250,7 +20523,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(getRowTs_result other) { @@ -20365,7 +20650,7 @@ public class Hbase { struct.success = new ArrayList(_list168.size); for (int _i169 = 0; _i169 < _list168.size; ++_i169) { - TRowResult _elem170; // optional + TRowResult _elem170; // required _elem170 = new TRowResult(); _elem170.read(iprot); struct.success.add(_elem170); @@ -20467,7 +20752,7 @@ public class Hbase { struct.success = new ArrayList(_list173.size); for (int _i174 = 0; _i174 < _list173.size; ++_i174) { - TRowResult _elem175; // optional + TRowResult _elem175; // required _elem175 = new TRowResult(); _elem175.read(iprot); struct.success.add(_elem175); @@ -21032,7 +21317,34 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_row = true && (isSetRow()); + builder.append(present_row); + if (present_row) + builder.append(row); + + boolean present_columns = true && (isSetColumns()); + builder.append(present_columns); + if (present_columns) + builder.append(columns); + + boolean present_timestamp = true; + builder.append(present_timestamp); + if (present_timestamp) + builder.append(timestamp); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(getRowWithColumnsTs_args other) { @@ -21215,7 +21527,7 @@ public class Hbase { struct.columns = new ArrayList(_list176.size); for (int _i177 = 0; _i177 < _list176.size; ++_i177) { - ByteBuffer _elem178; // optional + ByteBuffer _elem178; // required _elem178 = iprot.readBinary(); struct.columns.add(_elem178); } @@ -21242,7 +21554,7 @@ public class Hbase { for (int _i180 = 0; _i180 < _map179.size; ++_i180) { ByteBuffer _key181; // required - ByteBuffer _val182; // required + ByteBuffer _val182; // optional _key181 = iprot.readBinary(); _val182 = iprot.readBinary(); struct.attributes.put(_key181, _val182); @@ -21389,7 +21701,7 @@ public class Hbase { struct.columns = new ArrayList(_list187.size); for (int _i188 = 0; _i188 < _list187.size; ++_i188) { - ByteBuffer _elem189; // optional + ByteBuffer _elem189; // required _elem189 = iprot.readBinary(); struct.columns.add(_elem189); } @@ -21407,7 +21719,7 @@ public class Hbase { for (int _i191 = 0; _i191 < _map190.size; ++_i191) { ByteBuffer _key192; // required - ByteBuffer _val193; // required + ByteBuffer _val193; // optional _key192 = iprot.readBinary(); _val193 = iprot.readBinary(); struct.attributes.put(_key192, _val193); @@ -21694,7 +22006,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(getRowWithColumnsTs_result other) { @@ -21809,7 +22133,7 @@ public class Hbase { struct.success = new ArrayList(_list194.size); for (int _i195 = 0; _i195 < _list194.size; ++_i195) { - TRowResult _elem196; // optional + TRowResult _elem196; // required _elem196 = new TRowResult(); _elem196.read(iprot); struct.success.add(_elem196); @@ -21911,7 +22235,7 @@ public class Hbase { struct.success = new ArrayList(_list199.size); for (int _i200 = 0; _i200 < _list199.size; ++_i200) { - TRowResult _elem201; // optional + TRowResult _elem201; // required _elem201 = new TRowResult(); _elem201.read(iprot); struct.success.add(_elem201); @@ -22333,7 +22657,24 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_rows = true && (isSetRows()); + builder.append(present_rows); + if (present_rows) + builder.append(rows); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(getRows_args other) { @@ -22474,7 +22815,7 @@ public class Hbase { struct.rows = new ArrayList(_list202.size); for (int _i203 = 0; _i203 < _list202.size; ++_i203) { - ByteBuffer _elem204; // optional + ByteBuffer _elem204; // required _elem204 = iprot.readBinary(); struct.rows.add(_elem204); } @@ -22493,7 +22834,7 @@ public class Hbase { for (int _i206 = 0; _i206 < _map205.size; ++_i206) { ByteBuffer _key207; // required - ByteBuffer _val208; // required + ByteBuffer _val208; // optional _key207 = iprot.readBinary(); _val208 = iprot.readBinary(); struct.attributes.put(_key207, _val208); @@ -22616,7 +22957,7 @@ public class Hbase { struct.rows = new ArrayList(_list213.size); for (int _i214 = 0; _i214 < _list213.size; ++_i214) { - ByteBuffer _elem215; // optional + ByteBuffer _elem215; // required _elem215 = iprot.readBinary(); struct.rows.add(_elem215); } @@ -22630,7 +22971,7 @@ public class Hbase { for (int _i217 = 0; _i217 < _map216.size; ++_i217) { ByteBuffer _key218; // required - ByteBuffer _val219; // required + ByteBuffer _val219; // optional _key218 = iprot.readBinary(); _val219 = iprot.readBinary(); struct.attributes.put(_key218, _val219); @@ -22917,7 +23258,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(getRows_result other) { @@ -23032,7 +23385,7 @@ public class Hbase { struct.success = new ArrayList(_list220.size); for (int _i221 = 0; _i221 < _list220.size; ++_i221) { - TRowResult _elem222; // optional + TRowResult _elem222; // required _elem222 = new TRowResult(); _elem222.read(iprot); struct.success.add(_elem222); @@ -23134,7 +23487,7 @@ public class Hbase { struct.success = new ArrayList(_list225.size); for (int _i226 = 0; _i226 < _list225.size; ++_i226) { - TRowResult _elem227; // optional + TRowResult _elem227; // required _elem227 = new TRowResult(); _elem227.read(iprot); struct.success.add(_elem227); @@ -23647,7 +24000,29 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_rows = true && (isSetRows()); + builder.append(present_rows); + if (present_rows) + builder.append(rows); + + boolean present_columns = true && (isSetColumns()); + builder.append(present_columns); + if (present_columns) + builder.append(columns); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(getRowsWithColumns_args other) { @@ -23806,7 +24181,7 @@ public class Hbase { struct.rows = new ArrayList(_list228.size); for (int _i229 = 0; _i229 < _list228.size; ++_i229) { - ByteBuffer _elem230; // optional + ByteBuffer _elem230; // required _elem230 = iprot.readBinary(); struct.rows.add(_elem230); } @@ -23824,7 +24199,7 @@ public class Hbase { struct.columns = new ArrayList(_list231.size); for (int _i232 = 0; _i232 < _list231.size; ++_i232) { - ByteBuffer _elem233; // optional + ByteBuffer _elem233; // required _elem233 = iprot.readBinary(); struct.columns.add(_elem233); } @@ -23843,7 +24218,7 @@ public class Hbase { for (int _i235 = 0; _i235 < _map234.size; ++_i235) { ByteBuffer _key236; // required - ByteBuffer _val237; // required + ByteBuffer _val237; // optional _key236 = iprot.readBinary(); _val237 = iprot.readBinary(); struct.attributes.put(_key236, _val237); @@ -23990,7 +24365,7 @@ public class Hbase { struct.rows = new ArrayList(_list244.size); for (int _i245 = 0; _i245 < _list244.size; ++_i245) { - ByteBuffer _elem246; // optional + ByteBuffer _elem246; // required _elem246 = iprot.readBinary(); struct.rows.add(_elem246); } @@ -24003,7 +24378,7 @@ public class Hbase { struct.columns = new ArrayList(_list247.size); for (int _i248 = 0; _i248 < _list247.size; ++_i248) { - ByteBuffer _elem249; // optional + ByteBuffer _elem249; // required _elem249 = iprot.readBinary(); struct.columns.add(_elem249); } @@ -24017,7 +24392,7 @@ public class Hbase { for (int _i251 = 0; _i251 < _map250.size; ++_i251) { ByteBuffer _key252; // required - ByteBuffer _val253; // required + ByteBuffer _val253; // optional _key252 = iprot.readBinary(); _val253 = iprot.readBinary(); struct.attributes.put(_key252, _val253); @@ -24304,7 +24679,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(getRowsWithColumns_result other) { @@ -24419,7 +24806,7 @@ public class Hbase { struct.success = new ArrayList(_list254.size); for (int _i255 = 0; _i255 < _list254.size; ++_i255) { - TRowResult _elem256; // optional + TRowResult _elem256; // required _elem256 = new TRowResult(); _elem256.read(iprot); struct.success.add(_elem256); @@ -24521,7 +24908,7 @@ public class Hbase { struct.success = new ArrayList(_list259.size); for (int _i260 = 0; _i260 < _list259.size; ++_i260) { - TRowResult _elem261; // optional + TRowResult _elem261; // required _elem261 = new TRowResult(); _elem261.read(iprot); struct.success.add(_elem261); @@ -25017,7 +25404,29 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_rows = true && (isSetRows()); + builder.append(present_rows); + if (present_rows) + builder.append(rows); + + boolean present_timestamp = true; + builder.append(present_timestamp); + if (present_timestamp) + builder.append(timestamp); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(getRowsTs_args other) { @@ -25174,7 +25583,7 @@ public class Hbase { struct.rows = new ArrayList(_list262.size); for (int _i263 = 0; _i263 < _list262.size; ++_i263) { - ByteBuffer _elem264; // optional + ByteBuffer _elem264; // required _elem264 = iprot.readBinary(); struct.rows.add(_elem264); } @@ -25201,7 +25610,7 @@ public class Hbase { for (int _i266 = 0; _i266 < _map265.size; ++_i266) { ByteBuffer _key267; // required - ByteBuffer _val268; // required + ByteBuffer _val268; // optional _key267 = iprot.readBinary(); _val268 = iprot.readBinary(); struct.attributes.put(_key267, _val268); @@ -25333,7 +25742,7 @@ public class Hbase { struct.rows = new ArrayList(_list273.size); for (int _i274 = 0; _i274 < _list273.size; ++_i274) { - ByteBuffer _elem275; // optional + ByteBuffer _elem275; // required _elem275 = iprot.readBinary(); struct.rows.add(_elem275); } @@ -25351,7 +25760,7 @@ public class Hbase { for (int _i277 = 0; _i277 < _map276.size; ++_i277) { ByteBuffer _key278; // required - ByteBuffer _val279; // required + ByteBuffer _val279; // optional _key278 = iprot.readBinary(); _val279 = iprot.readBinary(); struct.attributes.put(_key278, _val279); @@ -25638,7 +26047,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(getRowsTs_result other) { @@ -25753,7 +26174,7 @@ public class Hbase { struct.success = new ArrayList(_list280.size); for (int _i281 = 0; _i281 < _list280.size; ++_i281) { - TRowResult _elem282; // optional + TRowResult _elem282; // required _elem282 = new TRowResult(); _elem282.read(iprot); struct.success.add(_elem282); @@ -25855,7 +26276,7 @@ public class Hbase { struct.success = new ArrayList(_list285.size); for (int _i286 = 0; _i286 < _list285.size; ++_i286) { - TRowResult _elem287; // optional + TRowResult _elem287; // required _elem287 = new TRowResult(); _elem287.read(iprot); struct.success.add(_elem287); @@ -26430,7 +26851,34 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_rows = true && (isSetRows()); + builder.append(present_rows); + if (present_rows) + builder.append(rows); + + boolean present_columns = true && (isSetColumns()); + builder.append(present_columns); + if (present_columns) + builder.append(columns); + + boolean present_timestamp = true; + builder.append(present_timestamp); + if (present_timestamp) + builder.append(timestamp); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(getRowsWithColumnsTs_args other) { @@ -26605,7 +27053,7 @@ public class Hbase { struct.rows = new ArrayList(_list288.size); for (int _i289 = 0; _i289 < _list288.size; ++_i289) { - ByteBuffer _elem290; // optional + ByteBuffer _elem290; // required _elem290 = iprot.readBinary(); struct.rows.add(_elem290); } @@ -26623,7 +27071,7 @@ public class Hbase { struct.columns = new ArrayList(_list291.size); for (int _i292 = 0; _i292 < _list291.size; ++_i292) { - ByteBuffer _elem293; // optional + ByteBuffer _elem293; // required _elem293 = iprot.readBinary(); struct.columns.add(_elem293); } @@ -26650,7 +27098,7 @@ public class Hbase { for (int _i295 = 0; _i295 < _map294.size; ++_i295) { ByteBuffer _key296; // required - ByteBuffer _val297; // required + ByteBuffer _val297; // optional _key296 = iprot.readBinary(); _val297 = iprot.readBinary(); struct.attributes.put(_key296, _val297); @@ -26806,7 +27254,7 @@ public class Hbase { struct.rows = new ArrayList(_list304.size); for (int _i305 = 0; _i305 < _list304.size; ++_i305) { - ByteBuffer _elem306; // optional + ByteBuffer _elem306; // required _elem306 = iprot.readBinary(); struct.rows.add(_elem306); } @@ -26819,7 +27267,7 @@ public class Hbase { struct.columns = new ArrayList(_list307.size); for (int _i308 = 0; _i308 < _list307.size; ++_i308) { - ByteBuffer _elem309; // optional + ByteBuffer _elem309; // required _elem309 = iprot.readBinary(); struct.columns.add(_elem309); } @@ -26837,7 +27285,7 @@ public class Hbase { for (int _i311 = 0; _i311 < _map310.size; ++_i311) { ByteBuffer _key312; // required - ByteBuffer _val313; // required + ByteBuffer _val313; // optional _key312 = iprot.readBinary(); _val313 = iprot.readBinary(); struct.attributes.put(_key312, _val313); @@ -27124,7 +27572,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(getRowsWithColumnsTs_result other) { @@ -27239,7 +27699,7 @@ public class Hbase { struct.success = new ArrayList(_list314.size); for (int _i315 = 0; _i315 < _list314.size; ++_i315) { - TRowResult _elem316; // optional + TRowResult _elem316; // required _elem316 = new TRowResult(); _elem316.read(iprot); struct.success.add(_elem316); @@ -27341,7 +27801,7 @@ public class Hbase { struct.success = new ArrayList(_list319.size); for (int _i320 = 0; _i320 < _list319.size; ++_i320) { - TRowResult _elem321; // optional + TRowResult _elem321; // required _elem321 = new TRowResult(); _elem321.read(iprot); struct.success.add(_elem321); @@ -27386,7 +27846,7 @@ public class Hbase { */ public List mutations; // required /** - * Put attributes + * Mutation attributes */ public Map attributes; // required @@ -27405,7 +27865,7 @@ public class Hbase { */ MUTATIONS((short)3, "mutations"), /** - * Put attributes + * Mutation attributes */ ATTRIBUTES((short)4, "attributes"); @@ -27687,14 +28147,14 @@ public class Hbase { } /** - * Put attributes + * Mutation attributes */ public Map getAttributes() { return this.attributes; } /** - * Put attributes + * Mutation attributes */ public mutateRow_args setAttributes(Map attributes) { this.attributes = attributes; @@ -27844,7 +28304,29 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_row = true && (isSetRow()); + builder.append(present_row); + if (present_row) + builder.append(row); + + boolean present_mutations = true && (isSetMutations()); + builder.append(present_mutations); + if (present_mutations) + builder.append(mutations); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(mutateRow_args other) { @@ -28011,7 +28493,7 @@ public class Hbase { struct.mutations = new ArrayList(_list322.size); for (int _i323 = 0; _i323 < _list322.size; ++_i323) { - Mutation _elem324; // optional + Mutation _elem324; // required _elem324 = new Mutation(); _elem324.read(iprot); struct.mutations.add(_elem324); @@ -28031,7 +28513,7 @@ public class Hbase { for (int _i326 = 0; _i326 < _map325.size; ++_i326) { ByteBuffer _key327; // required - ByteBuffer _val328; // required + ByteBuffer _val328; // optional _key327 = iprot.readBinary(); _val328 = iprot.readBinary(); struct.attributes.put(_key327, _val328); @@ -28169,7 +28651,7 @@ public class Hbase { struct.mutations = new ArrayList(_list333.size); for (int _i334 = 0; _i334 < _list333.size; ++_i334) { - Mutation _elem335; // optional + Mutation _elem335; // required _elem335 = new Mutation(); _elem335.read(iprot); struct.mutations.add(_elem335); @@ -28184,7 +28666,7 @@ public class Hbase { for (int _i337 = 0; _i337 < _map336.size; ++_i337) { ByteBuffer _key338; // required - ByteBuffer _val339; // required + ByteBuffer _val339; // optional _key338 = iprot.readBinary(); _val339 = iprot.readBinary(); struct.attributes.put(_key338, _val339); @@ -28451,7 +28933,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + boolean present_ia = true && (isSetIa()); + builder.append(present_ia); + if (present_ia) + builder.append(ia); + + return builder.toHashCode(); } public int compareTo(mutateRow_result other) { @@ -28686,7 +29180,7 @@ public class Hbase { */ public long timestamp; // required /** - * Put attributes + * Mutation attributes */ public Map attributes; // required @@ -28709,7 +29203,7 @@ public class Hbase { */ TIMESTAMP((short)4, "timestamp"), /** - * Put attributes + * Mutation attributes */ ATTRIBUTES((short)5, "attributes"); @@ -29034,14 +29528,14 @@ public class Hbase { } /** - * Put attributes + * Mutation attributes */ public Map getAttributes() { return this.attributes; } /** - * Put attributes + * Mutation attributes */ public mutateRowTs_args setAttributes(Map attributes) { this.attributes = attributes; @@ -29213,7 +29707,34 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_row = true && (isSetRow()); + builder.append(present_row); + if (present_row) + builder.append(row); + + boolean present_mutations = true && (isSetMutations()); + builder.append(present_mutations); + if (present_mutations) + builder.append(mutations); + + boolean present_timestamp = true; + builder.append(present_timestamp); + if (present_timestamp) + builder.append(timestamp); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(mutateRowTs_args other) { @@ -29396,7 +29917,7 @@ public class Hbase { struct.mutations = new ArrayList(_list340.size); for (int _i341 = 0; _i341 < _list340.size; ++_i341) { - Mutation _elem342; // optional + Mutation _elem342; // required _elem342 = new Mutation(); _elem342.read(iprot); struct.mutations.add(_elem342); @@ -29424,7 +29945,7 @@ public class Hbase { for (int _i344 = 0; _i344 < _map343.size; ++_i344) { ByteBuffer _key345; // required - ByteBuffer _val346; // required + ByteBuffer _val346; // optional _key345 = iprot.readBinary(); _val346 = iprot.readBinary(); struct.attributes.put(_key345, _val346); @@ -29571,7 +30092,7 @@ public class Hbase { struct.mutations = new ArrayList(_list351.size); for (int _i352 = 0; _i352 < _list351.size; ++_i352) { - Mutation _elem353; // optional + Mutation _elem353; // required _elem353 = new Mutation(); _elem353.read(iprot); struct.mutations.add(_elem353); @@ -29590,7 +30111,7 @@ public class Hbase { for (int _i355 = 0; _i355 < _map354.size; ++_i355) { ByteBuffer _key356; // required - ByteBuffer _val357; // required + ByteBuffer _val357; // optional _key356 = iprot.readBinary(); _val357 = iprot.readBinary(); struct.attributes.put(_key356, _val357); @@ -29857,7 +30378,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + boolean present_ia = true && (isSetIa()); + builder.append(present_ia); + if (present_ia) + builder.append(ia); + + return builder.toHashCode(); } public int compareTo(mutateRowTs_result other) { @@ -30082,7 +30615,7 @@ public class Hbase { */ public List rowBatches; // required /** - * Put attributes + * Mutation attributes */ public Map attributes; // required @@ -30097,7 +30630,7 @@ public class Hbase { */ ROW_BATCHES((short)2, "rowBatches"), /** - * Put attributes + * Mutation attributes */ ATTRIBUTES((short)3, "attributes"); @@ -30329,14 +30862,14 @@ public class Hbase { } /** - * Put attributes + * Mutation attributes */ public Map getAttributes() { return this.attributes; } /** - * Put attributes + * Mutation attributes */ public mutateRows_args setAttributes(Map attributes) { this.attributes = attributes; @@ -30464,7 +30997,24 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_rowBatches = true && (isSetRowBatches()); + builder.append(present_rowBatches); + if (present_rowBatches) + builder.append(rowBatches); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(mutateRows_args other) { @@ -30605,7 +31155,7 @@ public class Hbase { struct.rowBatches = new ArrayList(_list358.size); for (int _i359 = 0; _i359 < _list358.size; ++_i359) { - BatchMutation _elem360; // optional + BatchMutation _elem360; // required _elem360 = new BatchMutation(); _elem360.read(iprot); struct.rowBatches.add(_elem360); @@ -30625,7 +31175,7 @@ public class Hbase { for (int _i362 = 0; _i362 < _map361.size; ++_i362) { ByteBuffer _key363; // required - ByteBuffer _val364; // required + ByteBuffer _val364; // optional _key363 = iprot.readBinary(); _val364 = iprot.readBinary(); struct.attributes.put(_key363, _val364); @@ -30748,7 +31298,7 @@ public class Hbase { struct.rowBatches = new ArrayList(_list369.size); for (int _i370 = 0; _i370 < _list369.size; ++_i370) { - BatchMutation _elem371; // optional + BatchMutation _elem371; // required _elem371 = new BatchMutation(); _elem371.read(iprot); struct.rowBatches.add(_elem371); @@ -30763,7 +31313,7 @@ public class Hbase { for (int _i373 = 0; _i373 < _map372.size; ++_i373) { ByteBuffer _key374; // required - ByteBuffer _val375; // required + ByteBuffer _val375; // optional _key374 = iprot.readBinary(); _val375 = iprot.readBinary(); struct.attributes.put(_key374, _val375); @@ -31030,7 +31580,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + boolean present_ia = true && (isSetIa()); + builder.append(present_ia); + if (present_ia) + builder.append(ia); + + return builder.toHashCode(); } public int compareTo(mutateRows_result other) { @@ -31260,7 +31822,7 @@ public class Hbase { */ public long timestamp; // required /** - * Put attributes + * Mutation attributes */ public Map attributes; // required @@ -31279,7 +31841,7 @@ public class Hbase { */ TIMESTAMP((short)3, "timestamp"), /** - * Put attributes + * Mutation attributes */ ATTRIBUTES((short)4, "attributes"); @@ -31554,14 +32116,14 @@ public class Hbase { } /** - * Put attributes + * Mutation attributes */ public Map getAttributes() { return this.attributes; } /** - * Put attributes + * Mutation attributes */ public mutateRowsTs_args setAttributes(Map attributes) { this.attributes = attributes; @@ -31711,7 +32273,29 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_rowBatches = true && (isSetRowBatches()); + builder.append(present_rowBatches); + if (present_rowBatches) + builder.append(rowBatches); + + boolean present_timestamp = true; + builder.append(present_timestamp); + if (present_timestamp) + builder.append(timestamp); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(mutateRowsTs_args other) { @@ -31866,7 +32450,7 @@ public class Hbase { struct.rowBatches = new ArrayList(_list376.size); for (int _i377 = 0; _i377 < _list376.size; ++_i377) { - BatchMutation _elem378; // optional + BatchMutation _elem378; // required _elem378 = new BatchMutation(); _elem378.read(iprot); struct.rowBatches.add(_elem378); @@ -31894,7 +32478,7 @@ public class Hbase { for (int _i380 = 0; _i380 < _map379.size; ++_i380) { ByteBuffer _key381; // required - ByteBuffer _val382; // required + ByteBuffer _val382; // optional _key381 = iprot.readBinary(); _val382 = iprot.readBinary(); struct.attributes.put(_key381, _val382); @@ -32026,7 +32610,7 @@ public class Hbase { struct.rowBatches = new ArrayList(_list387.size); for (int _i388 = 0; _i388 < _list387.size; ++_i388) { - BatchMutation _elem389; // optional + BatchMutation _elem389; // required _elem389 = new BatchMutation(); _elem389.read(iprot); struct.rowBatches.add(_elem389); @@ -32045,7 +32629,7 @@ public class Hbase { for (int _i391 = 0; _i391 < _map390.size; ++_i391) { ByteBuffer _key392; // required - ByteBuffer _val393; // required + ByteBuffer _val393; // optional _key392 = iprot.readBinary(); _val393 = iprot.readBinary(); struct.attributes.put(_key392, _val393); @@ -32312,7 +32896,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + boolean present_ia = true && (isSetIa()); + builder.append(present_ia); + if (present_ia) + builder.append(ia); + + return builder.toHashCode(); } public int compareTo(mutateRowsTs_result other) { @@ -32968,7 +33564,29 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_row = true && (isSetRow()); + builder.append(present_row); + if (present_row) + builder.append(row); + + boolean present_column = true && (isSetColumn()); + builder.append(present_column); + if (present_column) + builder.append(column); + + boolean present_value = true; + builder.append(present_value); + if (present_value) + builder.append(value); + + return builder.toHashCode(); } public int compareTo(atomicIncrement_args other) { @@ -33561,7 +34179,24 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true; + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + boolean present_ia = true && (isSetIa()); + builder.append(present_ia); + if (present_ia) + builder.append(ia); + + return builder.toHashCode(); } public int compareTo(atomicIncrement_result other) { @@ -33659,6 +34294,8 @@ public class Hbase { private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bit_vector = new BitSet(1); read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -34274,7 +34911,29 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_row = true && (isSetRow()); + builder.append(present_row); + if (present_row) + builder.append(row); + + boolean present_column = true && (isSetColumn()); + builder.append(present_column); + if (present_column) + builder.append(column); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(deleteAll_args other) { @@ -34450,7 +35109,7 @@ public class Hbase { for (int _i395 = 0; _i395 < _map394.size; ++_i395) { ByteBuffer _key396; // required - ByteBuffer _val397; // required + ByteBuffer _val397; // optional _key396 = iprot.readBinary(); _val397 = iprot.readBinary(); struct.attributes.put(_key396, _val397); @@ -34580,7 +35239,7 @@ public class Hbase { for (int _i401 = 0; _i401 < _map400.size; ++_i401) { ByteBuffer _key402; // required - ByteBuffer _val403; // required + ByteBuffer _val403; // optional _key402 = iprot.readBinary(); _val403 = iprot.readBinary(); struct.attributes.put(_key402, _val403); @@ -34788,7 +35447,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(deleteAll_result other) { @@ -35497,7 +36163,34 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_row = true && (isSetRow()); + builder.append(present_row); + if (present_row) + builder.append(row); + + boolean present_column = true && (isSetColumn()); + builder.append(present_column); + if (present_column) + builder.append(column); + + boolean present_timestamp = true; + builder.append(present_timestamp); + if (present_timestamp) + builder.append(timestamp); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(deleteAllTs_args other) { @@ -35697,7 +36390,7 @@ public class Hbase { for (int _i405 = 0; _i405 < _map404.size; ++_i405) { ByteBuffer _key406; // required - ByteBuffer _val407; // required + ByteBuffer _val407; // optional _key406 = iprot.readBinary(); _val407 = iprot.readBinary(); struct.attributes.put(_key406, _val407); @@ -35840,7 +36533,7 @@ public class Hbase { for (int _i411 = 0; _i411 < _map410.size; ++_i411) { ByteBuffer _key412; // required - ByteBuffer _val413; // required + ByteBuffer _val413; // optional _key412 = iprot.readBinary(); _val413 = iprot.readBinary(); struct.attributes.put(_key412, _val413); @@ -36048,7 +36741,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(deleteAllTs_result other) { @@ -36602,7 +37302,24 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_row = true && (isSetRow()); + builder.append(present_row); + if (present_row) + builder.append(row); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(deleteAllRow_args other) { @@ -36752,7 +37469,7 @@ public class Hbase { for (int _i415 = 0; _i415 < _map414.size; ++_i415) { ByteBuffer _key416; // required - ByteBuffer _val417; // required + ByteBuffer _val417; // optional _key416 = iprot.readBinary(); _val417 = iprot.readBinary(); struct.attributes.put(_key416, _val417); @@ -36867,7 +37584,7 @@ public class Hbase { for (int _i421 = 0; _i421 < _map420.size; ++_i421) { ByteBuffer _key422; // required - ByteBuffer _val423; // required + ByteBuffer _val423; // optional _key422 = iprot.readBinary(); _val423 = iprot.readBinary(); struct.attributes.put(_key422, _val423); @@ -37075,7 +37792,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(deleteAllRow_result other) { @@ -37703,7 +38427,29 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_row = true && (isSetRow()); + builder.append(present_row); + if (present_row) + builder.append(row); + + boolean present_timestamp = true; + builder.append(present_timestamp); + if (present_timestamp) + builder.append(timestamp); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(deleteAllRowTs_args other) { @@ -37877,7 +38623,7 @@ public class Hbase { for (int _i425 = 0; _i425 < _map424.size; ++_i425) { ByteBuffer _key426; // required - ByteBuffer _val427; // required + ByteBuffer _val427; // optional _key426 = iprot.readBinary(); _val427 = iprot.readBinary(); struct.attributes.put(_key426, _val427); @@ -38005,7 +38751,7 @@ public class Hbase { for (int _i431 = 0; _i431 < _map430.size; ++_i431) { ByteBuffer _key432; // required - ByteBuffer _val433; // required + ByteBuffer _val433; // optional _key432 = iprot.readBinary(); _val433 = iprot.readBinary(); struct.attributes.put(_key432, _val433); @@ -38213,7 +38959,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(deleteAllRowTs_result other) { @@ -38757,7 +39510,24 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_scan = true && (isSetScan()); + builder.append(present_scan); + if (present_scan) + builder.append(scan); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(scannerOpenWithScan_args other) { @@ -38908,7 +39678,7 @@ public class Hbase { for (int _i435 = 0; _i435 < _map434.size; ++_i435) { ByteBuffer _key436; // required - ByteBuffer _val437; // required + ByteBuffer _val437; // optional _key436 = iprot.readBinary(); _val437 = iprot.readBinary(); struct.attributes.put(_key436, _val437); @@ -39024,7 +39794,7 @@ public class Hbase { for (int _i441 = 0; _i441 < _map440.size; ++_i441) { ByteBuffer _key442; // required - ByteBuffer _val443; // required + ByteBuffer _val443; // optional _key442 = iprot.readBinary(); _val443 = iprot.readBinary(); struct.attributes.put(_key442, _val443); @@ -39294,7 +40064,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true; + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(scannerOpenWithScan_result other) { @@ -39374,6 +40156,8 @@ public class Hbase { private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bit_vector = new BitSet(1); read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -39986,7 +40770,29 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_startRow = true && (isSetStartRow()); + builder.append(present_startRow); + if (present_startRow) + builder.append(startRow); + + boolean present_columns = true && (isSetColumns()); + builder.append(present_columns); + if (present_columns) + builder.append(columns); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(scannerOpen_args other) { @@ -40153,7 +40959,7 @@ public class Hbase { struct.columns = new ArrayList(_list444.size); for (int _i445 = 0; _i445 < _list444.size; ++_i445) { - ByteBuffer _elem446; // optional + ByteBuffer _elem446; // required _elem446 = iprot.readBinary(); struct.columns.add(_elem446); } @@ -40172,7 +40978,7 @@ public class Hbase { for (int _i448 = 0; _i448 < _map447.size; ++_i448) { ByteBuffer _key449; // required - ByteBuffer _val450; // required + ByteBuffer _val450; // optional _key449 = iprot.readBinary(); _val450 = iprot.readBinary(); struct.attributes.put(_key449, _val450); @@ -40310,7 +41116,7 @@ public class Hbase { struct.columns = new ArrayList(_list455.size); for (int _i456 = 0; _i456 < _list455.size; ++_i456) { - ByteBuffer _elem457; // optional + ByteBuffer _elem457; // required _elem457 = iprot.readBinary(); struct.columns.add(_elem457); } @@ -40324,7 +41130,7 @@ public class Hbase { for (int _i459 = 0; _i459 < _map458.size; ++_i459) { ByteBuffer _key460; // required - ByteBuffer _val461; // required + ByteBuffer _val461; // optional _key460 = iprot.readBinary(); _val461 = iprot.readBinary(); struct.attributes.put(_key460, _val461); @@ -40594,7 +41400,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true; + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(scannerOpen_result other) { @@ -40674,6 +41492,8 @@ public class Hbase { private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bit_vector = new BitSet(1); read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -41371,7 +42191,34 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_startRow = true && (isSetStartRow()); + builder.append(present_startRow); + if (present_startRow) + builder.append(startRow); + + boolean present_stopRow = true && (isSetStopRow()); + builder.append(present_stopRow); + if (present_stopRow) + builder.append(stopRow); + + boolean present_columns = true && (isSetColumns()); + builder.append(present_columns); + if (present_columns) + builder.append(columns); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(scannerOpenWithStop_args other) { @@ -41564,7 +42411,7 @@ public class Hbase { struct.columns = new ArrayList(_list462.size); for (int _i463 = 0; _i463 < _list462.size; ++_i463) { - ByteBuffer _elem464; // optional + ByteBuffer _elem464; // required _elem464 = iprot.readBinary(); struct.columns.add(_elem464); } @@ -41583,7 +42430,7 @@ public class Hbase { for (int _i466 = 0; _i466 < _map465.size; ++_i466) { ByteBuffer _key467; // required - ByteBuffer _val468; // required + ByteBuffer _val468; // optional _key467 = iprot.readBinary(); _val468 = iprot.readBinary(); struct.attributes.put(_key467, _val468); @@ -41736,7 +42583,7 @@ public class Hbase { struct.columns = new ArrayList(_list473.size); for (int _i474 = 0; _i474 < _list473.size; ++_i474) { - ByteBuffer _elem475; // optional + ByteBuffer _elem475; // required _elem475 = iprot.readBinary(); struct.columns.add(_elem475); } @@ -41750,7 +42597,7 @@ public class Hbase { for (int _i477 = 0; _i477 < _map476.size; ++_i477) { ByteBuffer _key478; // required - ByteBuffer _val479; // required + ByteBuffer _val479; // optional _key478 = iprot.readBinary(); _val479 = iprot.readBinary(); struct.attributes.put(_key478, _val479); @@ -42020,7 +42867,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true; + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(scannerOpenWithStop_result other) { @@ -42100,6 +42959,8 @@ public class Hbase { private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bit_vector = new BitSet(1); read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -42700,7 +43561,29 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_startAndPrefix = true && (isSetStartAndPrefix()); + builder.append(present_startAndPrefix); + if (present_startAndPrefix) + builder.append(startAndPrefix); + + boolean present_columns = true && (isSetColumns()); + builder.append(present_columns); + if (present_columns) + builder.append(columns); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(scannerOpenWithPrefix_args other) { @@ -42867,7 +43750,7 @@ public class Hbase { struct.columns = new ArrayList(_list480.size); for (int _i481 = 0; _i481 < _list480.size; ++_i481) { - ByteBuffer _elem482; // optional + ByteBuffer _elem482; // required _elem482 = iprot.readBinary(); struct.columns.add(_elem482); } @@ -42886,7 +43769,7 @@ public class Hbase { for (int _i484 = 0; _i484 < _map483.size; ++_i484) { ByteBuffer _key485; // required - ByteBuffer _val486; // required + ByteBuffer _val486; // optional _key485 = iprot.readBinary(); _val486 = iprot.readBinary(); struct.attributes.put(_key485, _val486); @@ -43024,7 +43907,7 @@ public class Hbase { struct.columns = new ArrayList(_list491.size); for (int _i492 = 0; _i492 < _list491.size; ++_i492) { - ByteBuffer _elem493; // optional + ByteBuffer _elem493; // required _elem493 = iprot.readBinary(); struct.columns.add(_elem493); } @@ -43038,7 +43921,7 @@ public class Hbase { for (int _i495 = 0; _i495 < _map494.size; ++_i495) { ByteBuffer _key496; // required - ByteBuffer _val497; // required + ByteBuffer _val497; // optional _key496 = iprot.readBinary(); _val497 = iprot.readBinary(); struct.attributes.put(_key496, _val497); @@ -43308,7 +44191,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true; + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(scannerOpenWithPrefix_result other) { @@ -43388,6 +44283,8 @@ public class Hbase { private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bit_vector = new BitSet(1); read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -44074,7 +44971,34 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_startRow = true && (isSetStartRow()); + builder.append(present_startRow); + if (present_startRow) + builder.append(startRow); + + boolean present_columns = true && (isSetColumns()); + builder.append(present_columns); + if (present_columns) + builder.append(columns); + + boolean present_timestamp = true; + builder.append(present_timestamp); + if (present_timestamp) + builder.append(timestamp); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(scannerOpenTs_args other) { @@ -44255,7 +45179,7 @@ public class Hbase { struct.columns = new ArrayList(_list498.size); for (int _i499 = 0; _i499 < _list498.size; ++_i499) { - ByteBuffer _elem500; // optional + ByteBuffer _elem500; // required _elem500 = iprot.readBinary(); struct.columns.add(_elem500); } @@ -44282,7 +45206,7 @@ public class Hbase { for (int _i502 = 0; _i502 < _map501.size; ++_i502) { ByteBuffer _key503; // required - ByteBuffer _val504; // required + ByteBuffer _val504; // optional _key503 = iprot.readBinary(); _val504 = iprot.readBinary(); struct.attributes.put(_key503, _val504); @@ -44429,7 +45353,7 @@ public class Hbase { struct.columns = new ArrayList(_list509.size); for (int _i510 = 0; _i510 < _list509.size; ++_i510) { - ByteBuffer _elem511; // optional + ByteBuffer _elem511; // required _elem511 = iprot.readBinary(); struct.columns.add(_elem511); } @@ -44447,7 +45371,7 @@ public class Hbase { for (int _i513 = 0; _i513 < _map512.size; ++_i513) { ByteBuffer _key514; // required - ByteBuffer _val515; // required + ByteBuffer _val515; // optional _key514 = iprot.readBinary(); _val515 = iprot.readBinary(); struct.attributes.put(_key514, _val515); @@ -44717,7 +45641,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true; + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(scannerOpenTs_result other) { @@ -44797,6 +45733,8 @@ public class Hbase { private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bit_vector = new BitSet(1); read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -45568,7 +46506,39 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_tableName = true && (isSetTableName()); + builder.append(present_tableName); + if (present_tableName) + builder.append(tableName); + + boolean present_startRow = true && (isSetStartRow()); + builder.append(present_startRow); + if (present_startRow) + builder.append(startRow); + + boolean present_stopRow = true && (isSetStopRow()); + builder.append(present_stopRow); + if (present_stopRow) + builder.append(stopRow); + + boolean present_columns = true && (isSetColumns()); + builder.append(present_columns); + if (present_columns) + builder.append(columns); + + boolean present_timestamp = true; + builder.append(present_timestamp); + if (present_timestamp) + builder.append(timestamp); + + boolean present_attributes = true && (isSetAttributes()); + builder.append(present_attributes); + if (present_attributes) + builder.append(attributes); + + return builder.toHashCode(); } public int compareTo(scannerOpenWithStopTs_args other) { @@ -45775,7 +46745,7 @@ public class Hbase { struct.columns = new ArrayList(_list516.size); for (int _i517 = 0; _i517 < _list516.size; ++_i517) { - ByteBuffer _elem518; // optional + ByteBuffer _elem518; // required _elem518 = iprot.readBinary(); struct.columns.add(_elem518); } @@ -45802,7 +46772,7 @@ public class Hbase { for (int _i520 = 0; _i520 < _map519.size; ++_i520) { ByteBuffer _key521; // required - ByteBuffer _val522; // required + ByteBuffer _val522; // optional _key521 = iprot.readBinary(); _val522 = iprot.readBinary(); struct.attributes.put(_key521, _val522); @@ -45964,7 +46934,7 @@ public class Hbase { struct.columns = new ArrayList(_list527.size); for (int _i528 = 0; _i528 < _list527.size; ++_i528) { - ByteBuffer _elem529; // optional + ByteBuffer _elem529; // required _elem529 = iprot.readBinary(); struct.columns.add(_elem529); } @@ -45982,7 +46952,7 @@ public class Hbase { for (int _i531 = 0; _i531 < _map530.size; ++_i531) { ByteBuffer _key532; // required - ByteBuffer _val533; // required + ByteBuffer _val533; // optional _key532 = iprot.readBinary(); _val533 = iprot.readBinary(); struct.attributes.put(_key532, _val533); @@ -46252,7 +47222,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true; + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(scannerOpenWithStopTs_result other) { @@ -46332,6 +47314,8 @@ public class Hbase { private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bit_vector = new BitSet(1); read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -46657,7 +47641,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_id = true; + builder.append(present_id); + if (present_id) + builder.append(id); + + return builder.toHashCode(); } public int compareTo(scannerGet_args other) { @@ -47144,7 +48135,24 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + boolean present_ia = true && (isSetIa()); + builder.append(present_ia); + if (present_ia) + builder.append(ia); + + return builder.toHashCode(); } public int compareTo(scannerGet_result other) { @@ -47277,7 +48285,7 @@ public class Hbase { struct.success = new ArrayList(_list534.size); for (int _i535 = 0; _i535 < _list534.size; ++_i535) { - TRowResult _elem536; // optional + TRowResult _elem536; // required _elem536 = new TRowResult(); _elem536.read(iprot); struct.success.add(_elem536); @@ -47399,7 +48407,7 @@ public class Hbase { struct.success = new ArrayList(_list539.size); for (int _i540 = 0; _i540 < _list539.size; ++_i540) { - TRowResult _elem541; // optional + TRowResult _elem541; // required _elem541 = new TRowResult(); _elem541.read(iprot); struct.success.add(_elem541); @@ -47703,7 +48711,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_id = true; + builder.append(present_id); + if (present_id) + builder.append(id); + + boolean present_nbRows = true; + builder.append(present_nbRows); + if (present_nbRows) + builder.append(nbRows); + + return builder.toHashCode(); } public int compareTo(scannerGetList_args other) { @@ -48223,7 +49243,24 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + boolean present_ia = true && (isSetIa()); + builder.append(present_ia); + if (present_ia) + builder.append(ia); + + return builder.toHashCode(); } public int compareTo(scannerGetList_result other) { @@ -48356,7 +49393,7 @@ public class Hbase { struct.success = new ArrayList(_list542.size); for (int _i543 = 0; _i543 < _list542.size; ++_i543) { - TRowResult _elem544; // optional + TRowResult _elem544; // required _elem544 = new TRowResult(); _elem544.read(iprot); struct.success.add(_elem544); @@ -48478,7 +49515,7 @@ public class Hbase { struct.success = new ArrayList(_list547.size); for (int _i548 = 0; _i548 < _list547.size; ++_i548) { - TRowResult _elem549; // optional + TRowResult _elem549; // required _elem549 = new TRowResult(); _elem549.read(iprot); struct.success.add(_elem549); @@ -48711,7 +49748,14 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_id = true; + builder.append(present_id); + if (present_id) + builder.append(id); + + return builder.toHashCode(); } public int compareTo(scannerClose_args other) { @@ -49119,7 +50163,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + boolean present_ia = true && (isSetIa()); + builder.append(present_ia); + if (present_ia) + builder.append(ia); + + return builder.toHashCode(); } public int compareTo(scannerClose_result other) { @@ -49322,46 +50378,28 @@ public class Hbase { } - public static class getRowOrBefore_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRowOrBefore_args"); + public static class getRegionInfo_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRegionInfo_args"); - private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("family", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new getRowOrBefore_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new getRowOrBefore_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new getRegionInfo_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new getRegionInfo_argsTupleSchemeFactory()); } /** - * name of table - */ - public ByteBuffer tableName; // required - /** * row key */ public ByteBuffer row; // required - /** - * column name - */ - public ByteBuffer family; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { /** - * name of table - */ - TABLE_NAME((short)1, "tableName"), - /** * row key */ - ROW((short)2, "row"), - /** - * column name - */ - FAMILY((short)3, "family"); + ROW((short)1, "row"); private static final Map byName = new HashMap(); @@ -49376,12 +50414,8 @@ public class Hbase { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // TABLE_NAME - return TABLE_NAME; - case 2: // ROW + case 1: // ROW return ROW; - case 3: // FAMILY - return FAMILY; default: return null; } @@ -49425,94 +50459,38 @@ public class Hbase { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Text"))); tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Text"))); - tmpMap.put(_Fields.FAMILY, new org.apache.thrift.meta_data.FieldMetaData("family", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Text"))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowOrBefore_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRegionInfo_args.class, metaDataMap); } - public getRowOrBefore_args() { + public getRegionInfo_args() { } - public getRowOrBefore_args( - ByteBuffer tableName, - ByteBuffer row, - ByteBuffer family) + public getRegionInfo_args( + ByteBuffer row) { this(); - this.tableName = tableName; this.row = row; - this.family = family; } /** * Performs a deep copy on other. */ - public getRowOrBefore_args(getRowOrBefore_args other) { - if (other.isSetTableName()) { - this.tableName = other.tableName; - } + public getRegionInfo_args(getRegionInfo_args other) { if (other.isSetRow()) { this.row = other.row; } - if (other.isSetFamily()) { - this.family = other.family; - } } - public getRowOrBefore_args deepCopy() { - return new getRowOrBefore_args(this); + public getRegionInfo_args deepCopy() { + return new getRegionInfo_args(this); } @Override public void clear() { - this.tableName = null; this.row = null; - this.family = null; - } - - /** - * name of table - */ - public byte[] getTableName() { - setTableName(org.apache.thrift.TBaseHelper.rightSize(tableName)); - return tableName == null ? null : tableName.array(); - } - - public ByteBuffer bufferForTableName() { - return tableName; - } - - /** - * name of table - */ - public getRowOrBefore_args setTableName(byte[] tableName) { - setTableName(tableName == null ? (ByteBuffer)null : ByteBuffer.wrap(tableName)); - return this; - } - - public getRowOrBefore_args setTableName(ByteBuffer tableName) { - this.tableName = tableName; - return this; - } - - public void unsetTableName() { - this.tableName = null; - } - - /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ - public boolean isSetTableName() { - return this.tableName != null; - } - - public void setTableNameIsSet(boolean value) { - if (!value) { - this.tableName = null; - } } /** @@ -49530,12 +50508,12 @@ public class Hbase { /** * row key */ - public getRowOrBefore_args setRow(byte[] row) { + public getRegionInfo_args setRow(byte[] row) { setRow(row == null ? (ByteBuffer)null : ByteBuffer.wrap(row)); return this; } - public getRowOrBefore_args setRow(ByteBuffer row) { + public getRegionInfo_args setRow(ByteBuffer row) { this.row = row; return this; } @@ -49555,56 +50533,8 @@ public class Hbase { } } - /** - * column name - */ - public byte[] getFamily() { - setFamily(org.apache.thrift.TBaseHelper.rightSize(family)); - return family == null ? null : family.array(); - } - - public ByteBuffer bufferForFamily() { - return family; - } - - /** - * column name - */ - public getRowOrBefore_args setFamily(byte[] family) { - setFamily(family == null ? (ByteBuffer)null : ByteBuffer.wrap(family)); - return this; - } - - public getRowOrBefore_args setFamily(ByteBuffer family) { - this.family = family; - return this; - } - - public void unsetFamily() { - this.family = null; - } - - /** Returns true if field family is set (has been assigned a value) and false otherwise */ - public boolean isSetFamily() { - return this.family != null; - } - - public void setFamilyIsSet(boolean value) { - if (!value) { - this.family = null; - } - } - public void setFieldValue(_Fields field, Object value) { switch (field) { - case TABLE_NAME: - if (value == null) { - unsetTableName(); - } else { - setTableName((ByteBuffer)value); - } - break; - case ROW: if (value == null) { unsetRow(); @@ -49613,28 +50543,14 @@ public class Hbase { } break; - case FAMILY: - if (value == null) { - unsetFamily(); - } else { - setFamily((ByteBuffer)value); - } - break; - } } public Object getFieldValue(_Fields field) { switch (field) { - case TABLE_NAME: - return getTableName(); - case ROW: return getRow(); - case FAMILY: - return getFamily(); - } throw new IllegalStateException(); } @@ -49646,12 +50562,8 @@ public class Hbase { } switch (field) { - case TABLE_NAME: - return isSetTableName(); case ROW: return isSetRow(); - case FAMILY: - return isSetFamily(); } throw new IllegalStateException(); } @@ -49660,24 +50572,15 @@ public class Hbase { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof getRowOrBefore_args) - return this.equals((getRowOrBefore_args)that); + if (that instanceof getRegionInfo_args) + return this.equals((getRegionInfo_args)that); return false; } - public boolean equals(getRowOrBefore_args that) { + public boolean equals(getRegionInfo_args that) { if (that == null) return false; - boolean this_present_tableName = true && this.isSetTableName(); - boolean that_present_tableName = true && that.isSetTableName(); - if (this_present_tableName || that_present_tableName) { - if (!(this_present_tableName && that_present_tableName)) - return false; - if (!this.tableName.equals(that.tableName)) - return false; - } - boolean this_present_row = true && this.isSetRow(); boolean that_present_row = true && that.isSetRow(); if (this_present_row || that_present_row) { @@ -49687,41 +50590,29 @@ public class Hbase { return false; } - boolean this_present_family = true && this.isSetFamily(); - boolean that_present_family = true && that.isSetFamily(); - if (this_present_family || that_present_family) { - if (!(this_present_family && that_present_family)) - return false; - if (!this.family.equals(that.family)) - return false; - } - return true; } @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_row = true && (isSetRow()); + builder.append(present_row); + if (present_row) + builder.append(row); + + return builder.toHashCode(); } - public int compareTo(getRowOrBefore_args other) { + public int compareTo(getRegionInfo_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - getRowOrBefore_args typedOther = (getRowOrBefore_args)other; + getRegionInfo_args typedOther = (getRegionInfo_args)other; - lastComparison = Boolean.valueOf(isSetTableName()).compareTo(typedOther.isSetTableName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTableName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, typedOther.tableName); - if (lastComparison != 0) { - return lastComparison; - } - } lastComparison = Boolean.valueOf(isSetRow()).compareTo(typedOther.isSetRow()); if (lastComparison != 0) { return lastComparison; @@ -49732,16 +50623,6 @@ public class Hbase { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetFamily()).compareTo(typedOther.isSetFamily()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetFamily()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.family, typedOther.family); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -49759,953 +50640,7 @@ public class Hbase { @Override public String toString() { - StringBuilder sb = new StringBuilder("getRowOrBefore_args("); - boolean first = true; - - sb.append("tableName:"); - if (this.tableName == null) { - sb.append("null"); - } else { - sb.append(this.tableName); - } - first = false; - if (!first) sb.append(", "); - sb.append("row:"); - if (this.row == null) { - sb.append("null"); - } else { - sb.append(this.row); - } - first = false; - if (!first) sb.append(", "); - sb.append("family:"); - if (this.family == null) { - sb.append("null"); - } else { - sb.append(this.family); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class getRowOrBefore_argsStandardSchemeFactory implements SchemeFactory { - public getRowOrBefore_argsStandardScheme getScheme() { - return new getRowOrBefore_argsStandardScheme(); - } - } - - private static class getRowOrBefore_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, getRowOrBefore_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // TABLE_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tableName = iprot.readBinary(); - struct.setTableNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // ROW - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.row = iprot.readBinary(); - struct.setRowIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // FAMILY - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.family = iprot.readBinary(); - struct.setFamilyIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - - // check for required fields of primitive type, which can't be checked in the validate method - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, getRowOrBefore_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.tableName != null) { - oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); - oprot.writeBinary(struct.tableName); - oprot.writeFieldEnd(); - } - if (struct.row != null) { - oprot.writeFieldBegin(ROW_FIELD_DESC); - oprot.writeBinary(struct.row); - oprot.writeFieldEnd(); - } - if (struct.family != null) { - oprot.writeFieldBegin(FAMILY_FIELD_DESC); - oprot.writeBinary(struct.family); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class getRowOrBefore_argsTupleSchemeFactory implements SchemeFactory { - public getRowOrBefore_argsTupleScheme getScheme() { - return new getRowOrBefore_argsTupleScheme(); - } - } - - private static class getRowOrBefore_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, getRowOrBefore_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetTableName()) { - optionals.set(0); - } - if (struct.isSetRow()) { - optionals.set(1); - } - if (struct.isSetFamily()) { - optionals.set(2); - } - oprot.writeBitSet(optionals, 3); - if (struct.isSetTableName()) { - oprot.writeBinary(struct.tableName); - } - if (struct.isSetRow()) { - oprot.writeBinary(struct.row); - } - if (struct.isSetFamily()) { - oprot.writeBinary(struct.family); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, getRowOrBefore_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); - if (incoming.get(0)) { - struct.tableName = iprot.readBinary(); - struct.setTableNameIsSet(true); - } - if (incoming.get(1)) { - struct.row = iprot.readBinary(); - struct.setRowIsSet(true); - } - if (incoming.get(2)) { - struct.family = iprot.readBinary(); - struct.setFamilyIsSet(true); - } - } - } - - } - - public static class getRowOrBefore_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRowOrBefore_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); - private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new getRowOrBefore_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new getRowOrBefore_resultTupleSchemeFactory()); - } - - public List success; // required - public IOError io; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"), - IO((short)1, "io"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - case 1: // IO - return IO; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCell.class)))); - tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRowOrBefore_result.class, metaDataMap); - } - - public getRowOrBefore_result() { - } - - public getRowOrBefore_result( - List success, - IOError io) - { - this(); - this.success = success; - this.io = io; - } - - /** - * Performs a deep copy on other. - */ - public getRowOrBefore_result(getRowOrBefore_result other) { - if (other.isSetSuccess()) { - List __this__success = new ArrayList(); - for (TCell other_element : other.success) { - __this__success.add(new TCell(other_element)); - } - this.success = __this__success; - } - if (other.isSetIo()) { - this.io = new IOError(other.io); - } - } - - public getRowOrBefore_result deepCopy() { - return new getRowOrBefore_result(this); - } - - @Override - public void clear() { - this.success = null; - this.io = null; - } - - public int getSuccessSize() { - return (this.success == null) ? 0 : this.success.size(); - } - - public java.util.Iterator getSuccessIterator() { - return (this.success == null) ? null : this.success.iterator(); - } - - public void addToSuccess(TCell elem) { - if (this.success == null) { - this.success = new ArrayList(); - } - this.success.add(elem); - } - - public List getSuccess() { - return this.success; - } - - public getRowOrBefore_result setSuccess(List success) { - this.success = success; - return this; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public IOError getIo() { - return this.io; - } - - public getRowOrBefore_result setIo(IOError io) { - this.io = io; - return this; - } - - public void unsetIo() { - this.io = null; - } - - /** Returns true if field io is set (has been assigned a value) and false otherwise */ - public boolean isSetIo() { - return this.io != null; - } - - public void setIoIsSet(boolean value) { - if (!value) { - this.io = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((List)value); - } - break; - - case IO: - if (value == null) { - unsetIo(); - } else { - setIo((IOError)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - case IO: - return getIo(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - case IO: - return isSetIo(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof getRowOrBefore_result) - return this.equals((getRowOrBefore_result)that); - return false; - } - - public boolean equals(getRowOrBefore_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - boolean this_present_io = true && this.isSetIo(); - boolean that_present_io = true && that.isSetIo(); - if (this_present_io || that_present_io) { - if (!(this_present_io && that_present_io)) - return false; - if (!this.io.equals(that.io)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - return 0; - } - - public int compareTo(getRowOrBefore_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - getRowOrBefore_result typedOther = (getRowOrBefore_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetIo()).compareTo(typedOther.isSetIo()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetIo()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.io, typedOther.io); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("getRowOrBefore_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - if (!first) sb.append(", "); - sb.append("io:"); - if (this.io == null) { - sb.append("null"); - } else { - sb.append(this.io); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class getRowOrBefore_resultStandardSchemeFactory implements SchemeFactory { - public getRowOrBefore_resultStandardScheme getScheme() { - return new getRowOrBefore_resultStandardScheme(); - } - } - - private static class getRowOrBefore_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, getRowOrBefore_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list550 = iprot.readListBegin(); - struct.success = new ArrayList(_list550.size); - for (int _i551 = 0; _i551 < _list550.size; ++_i551) - { - TCell _elem552; // optional - _elem552 = new TCell(); - _elem552.read(iprot); - struct.success.add(_elem552); - } - iprot.readListEnd(); - } - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 1: // IO - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.io = new IOError(); - struct.io.read(iprot); - struct.setIoIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - - // check for required fields of primitive type, which can't be checked in the validate method - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, getRowOrBefore_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TCell _iter553 : struct.success) - { - _iter553.write(oprot); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.io != null) { - oprot.writeFieldBegin(IO_FIELD_DESC); - struct.io.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class getRowOrBefore_resultTupleSchemeFactory implements SchemeFactory { - public getRowOrBefore_resultTupleScheme getScheme() { - return new getRowOrBefore_resultTupleScheme(); - } - } - - private static class getRowOrBefore_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, getRowOrBefore_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - if (struct.isSetIo()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetSuccess()) { - { - oprot.writeI32(struct.success.size()); - for (TCell _iter554 : struct.success) - { - _iter554.write(oprot); - } - } - } - if (struct.isSetIo()) { - struct.io.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, getRowOrBefore_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); - if (incoming.get(0)) { - { - org.apache.thrift.protocol.TList _list555 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list555.size); - for (int _i556 = 0; _i556 < _list555.size; ++_i556) - { - TCell _elem557; // optional - _elem557 = new TCell(); - _elem557.read(iprot); - struct.success.add(_elem557); - } - } - struct.setSuccessIsSet(true); - } - if (incoming.get(1)) { - struct.io = new IOError(); - struct.io.read(iprot); - struct.setIoIsSet(true); - } - } - } - - } - - public static class getRegionInfo_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getRegionInfo_args"); - - private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new getRegionInfo_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new getRegionInfo_argsTupleSchemeFactory()); - } - - /** - * row key - */ - public ByteBuffer row; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - /** - * row key - */ - ROW((short)1, "row"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // ROW - return ROW; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Text"))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getRegionInfo_args.class, metaDataMap); - } - - public getRegionInfo_args() { - } - - public getRegionInfo_args( - ByteBuffer row) - { - this(); - this.row = row; - } - - /** - * Performs a deep copy on other. - */ - public getRegionInfo_args(getRegionInfo_args other) { - if (other.isSetRow()) { - this.row = other.row; - } - } - - public getRegionInfo_args deepCopy() { - return new getRegionInfo_args(this); - } - - @Override - public void clear() { - this.row = null; - } - - /** - * row key - */ - public byte[] getRow() { - setRow(org.apache.thrift.TBaseHelper.rightSize(row)); - return row == null ? null : row.array(); - } - - public ByteBuffer bufferForRow() { - return row; - } - - /** - * row key - */ - public getRegionInfo_args setRow(byte[] row) { - setRow(row == null ? (ByteBuffer)null : ByteBuffer.wrap(row)); - return this; - } - - public getRegionInfo_args setRow(ByteBuffer row) { - this.row = row; - return this; - } - - public void unsetRow() { - this.row = null; - } - - /** Returns true if field row is set (has been assigned a value) and false otherwise */ - public boolean isSetRow() { - return this.row != null; - } - - public void setRowIsSet(boolean value) { - if (!value) { - this.row = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case ROW: - if (value == null) { - unsetRow(); - } else { - setRow((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case ROW: - return getRow(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case ROW: - return isSetRow(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof getRegionInfo_args) - return this.equals((getRegionInfo_args)that); - return false; - } - - public boolean equals(getRegionInfo_args that) { - if (that == null) - return false; - - boolean this_present_row = true && this.isSetRow(); - boolean that_present_row = true && that.isSetRow(); - if (this_present_row || that_present_row) { - if (!(this_present_row && that_present_row)) - return false; - if (!this.row.equals(that.row)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - return 0; - } - - public int compareTo(getRegionInfo_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - getRegionInfo_args typedOther = (getRegionInfo_args)other; - - lastComparison = Boolean.valueOf(isSetRow()).compareTo(typedOther.isSetRow()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetRow()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.row, typedOther.row); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("getRegionInfo_args("); + StringBuilder sb = new StringBuilder("getRegionInfo_args("); boolean first = true; sb.append("row:"); @@ -51079,7 +51014,19 @@ public class Hbase { @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_io = true && (isSetIo()); + builder.append(present_io); + if (present_io) + builder.append(io); + + return builder.toHashCode(); } public int compareTo(getRegionInfo_result other) { diff --git src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java index 11e31e3..08ba49e 100644 --- src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java +++ src/main/java/org/apache/hadoop/hbase/thrift/generated/IOError.java @@ -6,6 +6,7 @@ */ package org.apache.hadoop.hbase.thrift.generated; +import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.thrift.scheme.IScheme; import org.apache.thrift.scheme.SchemeFactory; import org.apache.thrift.scheme.StandardScheme; @@ -227,7 +228,14 @@ public class IOError extends Exception implements org.apache.thrift.TBase, jav @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_value = true && (isSetValue()); + builder.append(present_value); + if (present_value) + builder.append(value); + + boolean present_timestamp = true; + builder.append(present_timestamp); + if (present_timestamp) + builder.append(timestamp); + + return builder.toHashCode(); } public int compareTo(TCell other) { diff --git src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java index ed251e8..d23cc70 100644 --- src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java +++ src/main/java/org/apache/hadoop/hbase/thrift/generated/TRegionInfo.java @@ -6,6 +6,7 @@ */ package org.apache.hadoop.hbase.thrift.generated; +import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.thrift.scheme.IScheme; import org.apache.thrift.scheme.SchemeFactory; import org.apache.thrift.scheme.StandardScheme; @@ -622,7 +623,44 @@ public class TRegionInfo implements org.apache.thrift.TBase, jav @Override public int hashCode() { - return 0; + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_startRow = true && (isSetStartRow()); + builder.append(present_startRow); + if (present_startRow) + builder.append(startRow); + + boolean present_stopRow = true && (isSetStopRow()); + builder.append(present_stopRow); + if (present_stopRow) + builder.append(stopRow); + + boolean present_timestamp = true && (isSetTimestamp()); + builder.append(present_timestamp); + if (present_timestamp) + builder.append(timestamp); + + boolean present_columns = true && (isSetColumns()); + builder.append(present_columns); + if (present_columns) + builder.append(columns); + + boolean present_caching = true && (isSetCaching()); + builder.append(present_caching); + if (present_caching) + builder.append(caching); + + boolean present_filterString = true && (isSetFilterString()); + builder.append(present_filterString); + if (present_filterString) + builder.append(filterString); + + return builder.toHashCode(); } public int compareTo(TScan other) { @@ -772,7 +805,7 @@ public class TScan implements org.apache.thrift.TBase, jav struct.columns = new ArrayList(_list18.size); for (int _i19 = 0; _i19 < _list18.size; ++_i19) { - ByteBuffer _elem20; // optional + ByteBuffer _elem20; // required _elem20 = iprot.readBinary(); struct.columns.add(_elem20); } @@ -944,7 +977,7 @@ public class TScan implements org.apache.thrift.TBase, jav struct.columns = new ArrayList(_list23.size); for (int _i24 = 0; _i24 < _list23.size; ++_i24) { - ByteBuffer _elem25; // optional + ByteBuffer _elem25; // required _elem25 = iprot.readBinary(); struct.columns.add(_elem25); } diff --git src/main/java/org/apache/hadoop/hbase/util/FSUtils.java src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index aebe5b0..bc6fc94 100644 --- src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -308,7 +308,7 @@ public abstract class FSUtils { FSUtils.setVersion(fs, rootdir, wait, retries); return; } - } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) + } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) <= 0) return; // version is deprecated require migration diff --git src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java index dee5301..0d1c5ea 100644 --- src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java +++ src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java @@ -52,7 +52,8 @@ public class RetryCounter { public void sleepUntilNextRetry() throws InterruptedException { int attempts = getAttemptTimes(); long sleepTime = (long) (retryIntervalMillis * Math.pow(2, attempts)); - LOG.info("Sleeping " + sleepTime + "ms before retry #" + attempts + "..."); + LOG.info("The " + attempts + " times to retry after sleeping " + sleepTime + + " ms"); timeUnit.sleep(sleepTime); } @@ -67,4 +68,4 @@ public class RetryCounter { public int getAttemptTimes() { return maxRetries-retriesRemaining+1; } -} +} \ No newline at end of file diff --git src/main/java/org/apache/hadoop/hbase/util/Writables.java src/main/java/org/apache/hadoop/hbase/util/Writables.java index 3d20723..ce9d598 100644 --- src/main/java/org/apache/hadoop/hbase/util/Writables.java +++ src/main/java/org/apache/hadoop/hbase/util/Writables.java @@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.util; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.migration.HRegionInfo090x; +import org.apache.hadoop.hbase.migration.HRegionInfo090x2; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.Writable; @@ -151,6 +152,17 @@ public class Writables { /** * @param bytes serialized bytes + * @return A HRegionInfo instance built out of passed bytes. + * @throws IOException e + */ + public static HRegionInfo090x2 getHRegionInfo90x2(final byte [] bytes) + throws IOException { + return (HRegionInfo090x2)getWritable(bytes, new HRegionInfo090x2()); + } + + + /** + * @param bytes serialized bytes * @return All the hregioninfos that are in the byte array. Keeps reading * till we hit the end. * @throws IOException e diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java index 233c3cc..a484d36 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java @@ -77,18 +77,10 @@ public class RecoverableZooKeeper { private int sessionTimeout; private String quorumServers; - // The metadata attached to each piece of data has the - // format: - // 1-byte constant - // 4-byte big-endian integer (length of next field) - // identifier corresponding uniquely to this process - // It is prepended to the data supplied by the user. - + private static final int ID_OFFSET = Bytes.SIZEOF_INT; // the magic number is to be backward compatible private static final byte MAGIC =(byte) 0XFF; - private static final int MAGIC_SIZE = Bytes.SIZEOF_BYTE; - private static final int ID_LENGTH_OFFSET = MAGIC_SIZE; - private static final int ID_LENGTH_SIZE = Bytes.SIZEOF_INT; + private static final int MAGIC_OFFSET = Bytes.SIZEOF_BYTE; public RecoverableZooKeeper(String quorumServers, int sessionTimeout, Watcher watcher, int maxRetries, int retryIntervalMillis) @@ -119,9 +111,12 @@ public class RecoverableZooKeeper { } /** - * delete is an idempotent operation. Retry before throwing exception. - * This function will not throw NoNodeException if the path does not - * exist. + * delete is an idempotent operation. Retry before throw out exception. + * This function will not throw out NoNodeException if the path is not existed + * @param path + * @param version + * @throws InterruptedException + * @throws KeeperException */ public void delete(String path, int version) throws InterruptedException, KeeperException { @@ -146,7 +141,12 @@ public class RecoverableZooKeeper { case CONNECTIONLOSS: case SESSIONEXPIRED: case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "delete"); + LOG.warn("Possibly transient ZooKeeper exception: " + e); + if (!retryCounter.shouldRetry()) { + LOG.error("ZooKeeper delete failed after " + + retryCounter.getMaxRetries() + " retries"); + throw e; + } break; default: @@ -160,8 +160,12 @@ public class RecoverableZooKeeper { } /** - * exists is an idempotent operation. Retry before throwing exception + * exists is an idempotent operation. Retry before throw out exception + * @param path + * @param watcher * @return A Stat instance + * @throws KeeperException + * @throws InterruptedException */ public Stat exists(String path, Watcher watcher) throws KeeperException, InterruptedException { @@ -174,7 +178,12 @@ public class RecoverableZooKeeper { case CONNECTIONLOSS: case SESSIONEXPIRED: case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "exists"); + LOG.warn("Possibly transient ZooKeeper exception: " + e); + if (!retryCounter.shouldRetry()) { + LOG.error("ZooKeeper exists failed after " + + retryCounter.getMaxRetries() + " retries"); + throw e; + } break; default: @@ -187,8 +196,12 @@ public class RecoverableZooKeeper { } /** - * exists is an idempotent operation. Retry before throwing exception + * exists is an idempotent operation. Retry before throw out exception + * @param path + * @param watch * @return A Stat instance + * @throws KeeperException + * @throws InterruptedException */ public Stat exists(String path, boolean watch) throws KeeperException, InterruptedException { @@ -201,7 +214,12 @@ public class RecoverableZooKeeper { case CONNECTIONLOSS: case SESSIONEXPIRED: case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "exists"); + LOG.warn("Possibly transient ZooKeeper exception: " + e); + if (!retryCounter.shouldRetry()) { + LOG.error("ZooKeeper exists failed after " + + retryCounter.getMaxRetries() + " retries"); + throw e; + } break; default: @@ -213,19 +231,13 @@ public class RecoverableZooKeeper { } } - private void retryOrThrow(RetryCounter retryCounter, KeeperException e, - String opName) throws KeeperException { - LOG.warn("Possibly transient ZooKeeper exception: " + e); - if (!retryCounter.shouldRetry()) { - LOG.error("ZooKeeper " + opName + " failed after " - + retryCounter.getMaxRetries() + " retries"); - throw e; - } - } - /** - * getChildren is an idempotent operation. Retry before throwing exception + * getChildren is an idempotent operation. Retry before throw out exception + * @param path + * @param watcher * @return List of children znodes + * @throws KeeperException + * @throws InterruptedException */ public List getChildren(String path, Watcher watcher) throws KeeperException, InterruptedException { @@ -238,7 +250,12 @@ public class RecoverableZooKeeper { case CONNECTIONLOSS: case SESSIONEXPIRED: case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "getChildren"); + LOG.warn("Possibly transient ZooKeeper exception: " + e); + if (!retryCounter.shouldRetry()) { + LOG.error("ZooKeeper getChildren failed after " + + retryCounter.getMaxRetries() + " retries"); + throw e; + } break; default: @@ -251,8 +268,12 @@ public class RecoverableZooKeeper { } /** - * getChildren is an idempotent operation. Retry before throwing exception + * getChildren is an idempotent operation. Retry before throw out exception + * @param path + * @param watch * @return List of children znodes + * @throws KeeperException + * @throws InterruptedException */ public List getChildren(String path, boolean watch) throws KeeperException, InterruptedException { @@ -265,7 +286,12 @@ public class RecoverableZooKeeper { case CONNECTIONLOSS: case SESSIONEXPIRED: case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "getChildren"); + LOG.warn("Possibly transient ZooKeeper exception: " + e); + if (!retryCounter.shouldRetry()) { + LOG.error("ZooKeeper getChildren failed after " + + retryCounter.getMaxRetries() + " retries"); + throw e; + } break; default: @@ -278,8 +304,13 @@ public class RecoverableZooKeeper { } /** - * getData is an idempotent operation. Retry before throwing exception + * getData is an idempotent operation. Retry before throw out exception + * @param path + * @param watcher + * @param stat * @return Data + * @throws KeeperException + * @throws InterruptedException */ public byte[] getData(String path, Watcher watcher, Stat stat) throws KeeperException, InterruptedException { @@ -293,7 +324,12 @@ public class RecoverableZooKeeper { case CONNECTIONLOSS: case SESSIONEXPIRED: case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "getData"); + LOG.warn("Possibly transient ZooKeeper exception: " + e); + if (!retryCounter.shouldRetry()) { + LOG.error("ZooKeeper getData failed after " + + retryCounter.getMaxRetries() + " retries"); + throw e; + } break; default: @@ -306,8 +342,13 @@ public class RecoverableZooKeeper { } /** - * getData is an idemnpotent operation. Retry before throwing exception + * getData is an idemnpotent operation. Retry before throw out exception + * @param path + * @param watch + * @param stat * @return Data + * @throws KeeperException + * @throws InterruptedException */ public byte[] getData(String path, boolean watch, Stat stat) throws KeeperException, InterruptedException { @@ -321,7 +362,12 @@ public class RecoverableZooKeeper { case CONNECTIONLOSS: case SESSIONEXPIRED: case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "getData"); + LOG.warn("Possibly transient ZooKeeper exception: " + e); + if (!retryCounter.shouldRetry()) { + LOG.error("ZooKeeper getData failed after " + + retryCounter.getMaxRetries() + " retries"); + throw e; + } break; default: @@ -337,7 +383,12 @@ public class RecoverableZooKeeper { * setData is NOT an idempotent operation. Retry may cause BadVersion Exception * Adding an identifier field into the data to check whether * badversion is caused by the result of previous correctly setData + * @param path + * @param data + * @param version * @return Stat instance + * @throws KeeperException + * @throws InterruptedException */ public Stat setData(String path, byte[] data, int version) throws KeeperException, InterruptedException { @@ -351,28 +402,33 @@ public class RecoverableZooKeeper { case CONNECTIONLOSS: case SESSIONEXPIRED: case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "setData"); + LOG.warn("Possibly transient ZooKeeper exception: " + e); + if (!retryCounter.shouldRetry()) { + LOG.error("ZooKeeper setData failed after " + + retryCounter.getMaxRetries() + " retries"); + throw e; + } break; case BADVERSION: // try to verify whether the previous setData success or not try{ Stat stat = new Stat(); byte[] revData = zk.getData(path, false, stat); - int idLength = Bytes.toInt(revData, ID_LENGTH_SIZE); - int dataLength = revData.length-ID_LENGTH_SIZE-idLength; - int dataOffset = ID_LENGTH_SIZE+idLength; + int idLength = Bytes.toInt(revData, ID_OFFSET); + int dataLength = revData.length-ID_OFFSET-idLength; + int dataOffset = ID_OFFSET+idLength; - if(Bytes.compareTo(revData, ID_LENGTH_SIZE, id.length, + if(Bytes.compareTo(revData, ID_OFFSET, id.length, revData, dataOffset, dataLength) == 0) { // the bad version is caused by previous successful setData return stat; } } catch(KeeperException keeperException){ - // the ZK is not reliable at this moment. just throwing exception + // the ZK is not reliable at this moment. just throw out exception throw keeperException; } - // throw other exceptions and verified bad version exceptions + // throw out other exceptions and verified bad version exceptions default: throw e; } @@ -385,8 +441,8 @@ public class RecoverableZooKeeper { /** *

* NONSEQUENTIAL create is idempotent operation. - * Retry before throwing exceptions. - * But this function will not throw the NodeExist exception back to the + * Retry before throw out exceptions. + * But this function will not throw out the NodeExist exception back to the * application. *

*

@@ -395,7 +451,13 @@ public class RecoverableZooKeeper { * or not. *

* + * @param path + * @param data + * @param acl + * @param createMode * @return Path + * @throws KeeperException + * @throws InterruptedException */ public String create(String path, byte[] data, List acl, CreateMode createMode) @@ -448,7 +510,12 @@ public class RecoverableZooKeeper { case CONNECTIONLOSS: case SESSIONEXPIRED: case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "create"); + LOG.warn("Possibly transient ZooKeeper exception: " + e); + if (!retryCounter.shouldRetry()) { + LOG.error("ZooKeeper create failed after " + + retryCounter.getMaxRetries() + " retries"); + throw e; + } break; default: @@ -483,7 +550,12 @@ public class RecoverableZooKeeper { case CONNECTIONLOSS: case SESSIONEXPIRED: case OPERATIONTIMEOUT: - retryOrThrow(retryCounter, e, "create"); + LOG.warn("Possibly transient ZooKeeper exception: " + e); + if (!retryCounter.shouldRetry()) { + LOG.error("ZooKeeper create failed after " + + retryCounter.getMaxRetries() + " retries"); + throw e; + } break; default: @@ -524,9 +596,9 @@ public class RecoverableZooKeeper { return data; } - int idLength = Bytes.toInt(data, ID_LENGTH_OFFSET); - int dataLength = data.length-MAGIC_SIZE-ID_LENGTH_SIZE-idLength; - int dataOffset = MAGIC_SIZE+ID_LENGTH_SIZE+idLength; + int idLength = Bytes.toInt(data, MAGIC_OFFSET); + int dataLength = data.length-MAGIC_OFFSET-ID_OFFSET-idLength; + int dataOffset = MAGIC_OFFSET+ID_OFFSET+idLength; byte[] newData = new byte[dataLength]; System.arraycopy(data, dataOffset, newData, 0, dataLength); @@ -540,7 +612,7 @@ public class RecoverableZooKeeper { return data; } - byte[] newData = new byte[MAGIC_SIZE+ID_LENGTH_SIZE+id.length+data.length]; + byte[] newData = new byte[MAGIC_OFFSET+ID_OFFSET+id.length+data.length]; int pos = 0; pos = Bytes.putByte(newData, pos, MAGIC); pos = Bytes.putInt(newData, pos, id.length); diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java index b6fef32..cb1539e 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java @@ -163,8 +163,7 @@ public class ZKConfig { } // Special case for 'hbase.cluster.distributed' property being 'true' if (key.startsWith("server.")) { - if (conf.get(HConstants.CLUSTER_DISTRIBUTED, HConstants.DEFAULT_CLUSTER_DISTRIBUTED). - equals(HConstants.CLUSTER_IS_DISTRIBUTED) + if (conf.get(HConstants.CLUSTER_DISTRIBUTED).equals(HConstants.CLUSTER_IS_DISTRIBUTED) && value.startsWith(HConstants.LOCALHOST)) { String msg = "The server in zoo.cfg cannot be set to localhost " + "in a fully-distributed setup because it won't be reachable. " + diff --git src/main/resources/org/apache/hadoop/hbase/rest/XMLSchema.xsd src/main/resources/org/apache/hadoop/hbase/rest/XMLSchema.xsd index c2df60d..de4fff1 100644 --- src/main/resources/org/apache/hadoop/hbase/rest/XMLSchema.xsd +++ src/main/resources/org/apache/hadoop/hbase/rest/XMLSchema.xsd @@ -166,13 +166,6 @@ - - - - - - - diff --git src/main/resources/org/apache/hadoop/hbase/rest/protobuf/StorageClusterStatusMessage.proto src/main/resources/org/apache/hadoop/hbase/rest/protobuf/StorageClusterStatusMessage.proto index 46e275d..2b032f7 100644 --- src/main/resources/org/apache/hadoop/hbase/rest/protobuf/StorageClusterStatusMessage.proto +++ src/main/resources/org/apache/hadoop/hbase/rest/protobuf/StorageClusterStatusMessage.proto @@ -26,13 +26,6 @@ message StorageClusterStatus { optional int32 storefileSizeMB = 4; optional int32 memstoreSizeMB = 5; optional int32 storefileIndexSizeMB = 6; - optional int64 readRequestsCount = 7; - optional int64 writeRequestsCount = 8; - optional int32 rootIndexSizeKB = 9; - optional int32 totalStaticIndexSizeKB = 10; - optional int32 totalStaticBloomSizeKB = 11; - optional int64 totalCompactingKVs = 12; - optional int64 currentCompactedKVs = 13; } message Node { required string name = 1; // name:port diff --git src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift index f698a6c..c4c783d 100644 --- src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift +++ src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift @@ -858,22 +858,6 @@ service Hbase { ) throws (1:IOError io, 2:IllegalArgument ia) /** - * Get the row just before the specified one. - * - * @return value for specified row/column - */ - list getRowOrBefore( - /** name of table */ - 1:Text tableName, - - /** row key */ - 2:Text row, - - /** column name */ - 3:Text family - ) throws (1:IOError io) - - /** * Get the regininfo for the specified row. It scans * the metatable to find region's start and end keys. * diff --git src/test/data/generate-hbase-2600-root-in-tmp.sh src/test/data/generate-hbase-2600-root-in-tmp.sh new file mode 100644 index 0000000..f886e12 --- /dev/null +++ src/test/data/generate-hbase-2600-root-in-tmp.sh @@ -0,0 +1,14 @@ +#!/bin/sh +set -ev +echo "create 't1','c1'" +NUM_PUTS=1000 +NUM_SPLITS=5 +FOO="dfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsd dfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsd dfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsd dfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsd dfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsd dfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsd dfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsd dfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsddfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsd dfjsgkljsdfklgjklsdjgkldjsfgjsdfklgjklsd" +for i in `seq 1 $NUM_PUTS` + do echo "put 't1','r${i}','c1','${FOO}'" +done +for i in `seq 1 $NUM_SPLITS` + do echo "split 't1'" + echo "list" + echo scan "'.META.'" +done diff --git src/test/data/hbase-2600-root.dir.tgz src/test/data/hbase-2600-root.dir.tgz new file mode 100644 index 0000000..f606141 Binary files /dev/null and src/test/data/hbase-2600-root.dir.tgz differ diff --git src/test/data/hbase-4388-root.dir.tgz src/test/data/hbase-4388-root.dir.tgz deleted file mode 100644 index da2244e..0000000 Binary files src/test/data/hbase-4388-root.dir.tgz and /dev/null differ diff --git src/test/java/org/apache/hadoop/hbase/TestKeyValue.java src/test/java/org/apache/hadoop/hbase/TestKeyValue.java index fae6902..1d024e2 100644 --- src/test/java/org/apache/hadoop/hbase/TestKeyValue.java +++ src/test/java/org/apache/hadoop/hbase/TestKeyValue.java @@ -131,12 +131,14 @@ public class TestKeyValue extends TestCase { public void testMoreComparisons() throws Exception { // Root compares long now = System.currentTimeMillis(); - KeyValue a = new KeyValue(Bytes.toBytes(".META.,,99999999999999"), now); - KeyValue b = new KeyValue(Bytes.toBytes(".META.,,1"), now); + String lastMeta = ".META.\",,99999999999999"; + String firstMeta = ".META.\",,1"; + KeyValue a = new KeyValue(Bytes.toBytes(lastMeta), now); + KeyValue b = new KeyValue(Bytes.toBytes(firstMeta), now); KVComparator c = new KeyValue.RootComparator(); assertTrue(c.compare(b, a) < 0); - KeyValue aa = new KeyValue(Bytes.toBytes(".META.,,1"), now); - KeyValue bb = new KeyValue(Bytes.toBytes(".META.,,1"), + KeyValue aa = new KeyValue(Bytes.toBytes(firstMeta), now); + KeyValue bb = new KeyValue(Bytes.toBytes(firstMeta), Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1235943454602L, (byte[])null); assertTrue(c.compare(aa, bb) < 0); diff --git src/test/java/org/apache/hadoop/hbase/catalog/TestMetaUpdate.java src/test/java/org/apache/hadoop/hbase/catalog/TestMetaUpdate.java new file mode 100644 index 0000000..2b1197c --- /dev/null +++ src/test/java/org/apache/hadoop/hbase/catalog/TestMetaUpdate.java @@ -0,0 +1,371 @@ +/** + * Copyright 2010 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.catalog; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import junit.framework.Assert; +import junit.framework.AssertionFailedError; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.FsShell; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.client.*; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterFileSystem; +import org.apache.hadoop.hbase.migration.HRegionInfo090x2; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.Writables; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static junit.framework.Assert.assertEquals; + +/** + * Test migration that removes HTableDescriptor from HRegionInfo moving the + * meta version from {@link org.apache.hadoop.hbase.HConstants#META_VERSION} + * to {@link org.apache.hadoop.hbase.HConstants#META_VERSION2}. + */ +@Category(MediumTests.class) +public class TestMetaUpdate { + static final Log LOG = LogFactory.getLog(TestMetaUpdate.class); + private final static HBaseTestingUtility TEST_UTIL = new MigrateTestUtil(); + private final static String TESTTABLE = "t1"; + private final static int ROWCOUNT = 1000; + + public static class MigrateTestUtil extends HBaseTestingUtility { + @Override + public Path createRootDir() throws IOException { + return null; + } + } + + @BeforeClass + public static void startCluster() throws Exception { + // Start up our mini cluster on top of an 0.90 root.dir that has data from + // a 0.90 hbase run -- it has a table with 100 rows in it -- and see if + // we can migrate from 0.90. + TEST_UTIL.startMiniZKCluster(); + TEST_UTIL.startMiniDFSCluster(1); + Path testdir = TEST_UTIL.getDataTestDir("TestMetaUpdate"); + // Untar our test dir. + File untar = untar(new File(testdir.toString())); + // Now copy the untar up into hdfs so when we start hbase, we'll run from it. + Configuration conf = TEST_UTIL.getConfiguration(); + FsShell shell = new FsShell(conf); + FileSystem fs = FileSystem.get(conf); + // Minihbase roots itself in user home directory up in minidfs. + Path homedir = fs.getHomeDirectory(); + doFsCommand(shell, + new String[]{"-put", untar.toURI().toString(), homedir.toString()}); + // See whats in minihdfs. + doFsCommand(shell, new String[]{"-lsr", "/"}); + Path hbaseRootdir = new Path(fs.makeQualified(homedir), "hbase"); + conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString()); + + TEST_UTIL.startMiniHBaseCluster(1, 3); + + // Assert we are running against the copied-up filesystem. The copied-up + // rootdir should have had a table named 'TestTable' in it. Assert it + // present. + + + HTable t = new HTable(TEST_UTIL.getConfiguration(), TESTTABLE); + ResultScanner scanner = t.getScanner(new Scan()); + int count = 0; + while (scanner.next() != null) { + count++; + } + + Assert.assertEquals(ROWCOUNT, count); + scanner.close(); + t.close(); + } + + private static File untar(final File testdir) throws IOException { + // Find the src data under src/test/data + final String datafile = "hbase-2600-root.dir"; + String srcTarFile = + System.getProperty("project.build.testSourceDirectory", "src/test") + + File.separator + "data" + File.separator + datafile + ".tgz"; + File homedir = new File(testdir.toString()); + File tgtUntarDir = new File(homedir, datafile); + if (tgtUntarDir.exists()) { + if (!FileUtil.fullyDelete(tgtUntarDir)) { + throw new IOException("Failed delete of " + tgtUntarDir.toString()); + } + } + LOG.info("Untarring " + srcTarFile + " into " + homedir.toString()); + FileUtil.unTar(new File(srcTarFile), homedir); + Assert.assertTrue(tgtUntarDir.exists()); + return tgtUntarDir; + } + + private static void doFsCommand(final FsShell shell, final String[] args) + throws Exception { + // Run the 'put' command. + int errcode = shell.run(args); + if (errcode != 0) throw new IOException("Failed put; errcode=" + errcode); + } + + /** + * @throws java.lang.Exception shutdownminicluster throws exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + /** + * This test assumes a master crash/failure after the meta and root update but + * before the filesystem version is updated. + * + * @throws Exception + */ + @Test + public void testMasterCrashWithoutVersionUpdate() throws Exception { + + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + MasterFileSystem masterFileSystem = master.getMasterFileSystem(); + //Simulate an old version + FSUtils.setVersion(masterFileSystem.getFileSystem(), + masterFileSystem.getRootDir(), + HConstants.FILE_SYSTEM_VERSION_INT - 1, + 10); + MetaMigratev2 metaMigratev2 = new MetaMigratev2(master); + + // There are no new migrations so we shouldn't have found any + List htds = metaMigratev2.updateAndOnlineRoot(); + assertEquals(0, htds.size()); + + htds = metaMigratev2.updateAndOnlineMeta(); + assertEquals(0, htds.size()); + + String versionStr = FSUtils.getVersion(masterFileSystem.getFileSystem(), + masterFileSystem.getRootDir()); + assertEquals(HConstants.FILE_SYSTEM_VERSION, versionStr); + MetaReader.fullScanMetaAndPrint(master.getCatalogTracker()); + } + + + /** + * This test assumes a master crash/failure in the middle of a meta ugprade + * before the filesystem version is updated. + * + * @throws Exception + */ + @Test + public void testMasterCrashWhileEditingMeta() throws Exception { + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + Configuration configuration = master.getConfiguration(); + MasterFileSystem masterFileSystem = master.getMasterFileSystem(); + + //Simulate an old version + FSUtils.setVersion(masterFileSystem.getFileSystem(), + masterFileSystem.getRootDir(), + HConstants.FILE_SYSTEM_VERSION_INT - 1, + 10); + + //Remove the last couple entries to simulate an old table. + HTable mTable = new HTable(configuration, HConstants.META_TABLE_NAME); + MetaReader.fullScanMetaAndPrint(master.getCatalogTracker()); + + String lastRow = "t1!,r941,1329264314014.1f87047e8ed7504c7dd439ebc732e633."; + byte[] lastRowBytes = Bytes.toBytes(lastRow); + Delete delete = new Delete(lastRowBytes); + MetaEditor.deleteMetaTable(master.getCatalogTracker(), delete); + + lastRow = "t1\",,1329264314014.c15ddd3b8e8504a4228357d5dc8d243e."; + lastRowBytes = Bytes.toBytes(lastRow); + delete = new Delete(lastRowBytes); + MetaEditor.deleteMetaTable(master.getCatalogTracker(), delete); + + byte[] tableName = Bytes.toBytes("t1"); + byte[] startRow = Bytes.toBytes("r941"); + byte[] endRow = Bytes.toBytes("r971"); + + HRegionInfo090x2 hri = new HRegionInfo090x2(tableName, + startRow, + endRow, + false); + Put p1 = new Put(hri.getRegionName()); + p1.add(HConstants.CATALOG_FAMILY, + HConstants.REGIONINFO_QUALIFIER, + Writables.getBytes(hri)); + MetaEditor.putToMetaTable(master.getCatalogTracker(), p1); + + hri = new HRegionInfo090x2(tableName, + endRow, + HConstants.EMPTY_BYTE_ARRAY, + false); + Put p2 = new Put(hri.getRegionName()); + p2.add(HConstants.CATALOG_FAMILY, + HConstants.REGIONINFO_QUALIFIER, + Writables.getBytes(hri)); + MetaEditor.putToMetaTable(master.getCatalogTracker(), p2); + MetaMigratev2 metaMigratev2 = new MetaMigratev2(master); + + + // There are no new migrations so we shouldn't have found any + List htds = metaMigratev2.updateAndOnlineRoot(); + assertEquals(0, htds.size()); + + htds = metaMigratev2.updateAndOnlineMeta(); + assertEquals(2, htds.size()); + + String versionStr = FSUtils.getVersion(masterFileSystem.getFileSystem(), + masterFileSystem.getRootDir()); + assertEquals(HConstants.FILE_SYSTEM_VERSION, versionStr); + + MetaReader.fullScanMetaAndPrint(master.getCatalogTracker()); + } + + + /** + * @param c + * @param htd + * @param family + * @param numRegions + * @return + * @throws IOException + * @deprecated Just for testing migration of meta from 0.90 to 0.92... will be + * removed thereafter + */ + public int createMultiRegionsWithLegacyHRI(final Configuration c, + final HTableDescriptor htd, + final byte[] family, + int numRegions) + throws IOException { + if (numRegions < 3) throw new IOException("Must create at least 3 regions"); + byte[] startKey = Bytes.toBytes("aaaaa"); + byte[] endKey = Bytes.toBytes("zzzzz"); + byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3); + byte[][] regionStartKeys = new byte[splitKeys.length + 1][]; + for (int i = 0; i < splitKeys.length; i++) { + regionStartKeys[i + 1] = splitKeys[i]; + } + regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY; + return createMultiRegionsWithLegacyHRI(c, htd, family, regionStartKeys); + } + + /** + * @param c + * @param htd + * @param columnFamily + * @param startKeys + * @return + * @throws IOException + * @deprecated Just for testing migration of meta from 0.92 to 0.92v2... + * will be removed thereafter + */ + public int createMultiRegionsWithLegacyHRI(final Configuration c, + final HTableDescriptor htd, + final byte[] columnFamily, + byte[][] startKeys) + throws IOException { + Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR); + HTable meta = new HTable(c, HConstants.META_TABLE_NAME); + if (!htd.hasFamily(columnFamily)) { + HColumnDescriptor hcd = new HColumnDescriptor(columnFamily); + htd.addFamily(hcd); + } + List newRegions + = new ArrayList(startKeys.length); + int count = 0; + for (int i = 0; i < startKeys.length; i++) { + int j = (i + 1) % startKeys.length; + HRegionInfo090x2 hri = new HRegionInfo090x2(htd.getName(), + startKeys[i], startKeys[j]); + Put put = new Put(hri.getRegionName()); + put.setWriteToWAL(false); + put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, + Writables.getBytes(hri)); + meta.put(put); + LOG.info("createMultiRegions: PUT inserted " + hri.toString()); + + newRegions.add(hri); + count++; + } + meta.close(); + return count; + } + + int createMultiRegionsWithNewHRI(final Configuration c, + final HTableDescriptor htd, + final byte[] family, + int numRegions) throws IOException { + if (numRegions < 3) throw new IOException("Must create at least 3 regions"); + byte[] startKey = Bytes.toBytes("aaaaa"); + byte[] endKey = Bytes.toBytes("zzzzz"); + byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3); + byte[][] regionStartKeys = new byte[splitKeys.length + 1][]; + for (int i = 0; i < splitKeys.length; i++) { + regionStartKeys[i + 1] = splitKeys[i]; + } + regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY; + return createMultiRegionsWithNewHRI(c, htd, family, regionStartKeys); + } + + int createMultiRegionsWithNewHRI(final Configuration c, final HTableDescriptor htd, + final byte[] columnFamily, byte[][] endKeys) + throws IOException { + Arrays.sort(endKeys, Bytes.BYTES_COMPARATOR); + HTable meta = new HTable(c, HConstants.META_TABLE_NAME); + if (!htd.hasFamily(columnFamily)) { + HColumnDescriptor hcd = new HColumnDescriptor(columnFamily); + htd.addFamily(hcd); + } + List newRegions + = new ArrayList(endKeys.length); + int count = 0; + for (int i = 0; i < endKeys.length; i++) { + int j = (i + 1) % endKeys.length; + HRegionInfo hri = new HRegionInfo(htd.getName(), + endKeys[i], endKeys[j]); + Put put = new Put(hri.getRegionName()); + put.setWriteToWAL(false); + put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, + Writables.getBytes(hri)); + meta.put(put); + LOG.info("createMultiRegions: PUT inserted " + hri.toString()); + + newRegions.add(hri); + count++; + } + meta.close(); + return count; + } + + @org.junit.Rule + public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu = + new org.apache.hadoop.hbase.ResourceCheckerJUnitRule(); +} \ No newline at end of file diff --git src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index f7430ee..727891b 100644 --- src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -4037,56 +4037,6 @@ public class TestFromClientSide { } } - @Test - public void testGetClosestRowBefore() throws IOException { - final byte [] tableAname = Bytes.toBytes("testGetClosestRowBefore"); - final byte [] row = Bytes.toBytes("row"); - - - byte[] firstRow = Bytes.toBytes("ro"); - byte[] beforeFirstRow = Bytes.toBytes("rn"); - byte[] beforeSecondRow = Bytes.toBytes("rov"); - - HTable table = TEST_UTIL.createTable(tableAname, - new byte [][] {HConstants.CATALOG_FAMILY, Bytes.toBytes("info2")}); - Put put = new Put(firstRow); - Put put2 = new Put(row); - byte[] zero = new byte[]{0}; - byte[] one = new byte[]{1}; - - put.add(HConstants.CATALOG_FAMILY, null, zero); - put2.add(HConstants.CATALOG_FAMILY, null, one); - - table.put(put); - table.put(put2); - - Result result = null; - - // Test before first that null is returned - result = table.getRowOrBefore(beforeFirstRow, HConstants.CATALOG_FAMILY); - assertTrue(result == null); - - // Test at first that first is returned - result = table.getRowOrBefore(firstRow, HConstants.CATALOG_FAMILY); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), zero)); - - // Test in between first and second that first is returned - result = table.getRowOrBefore(beforeSecondRow, HConstants.CATALOG_FAMILY); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), zero)); - - // Test at second make sure second is returned - result = table.getRowOrBefore(row, HConstants.CATALOG_FAMILY); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one)); - - // Test after second, make sure second is returned - result = table.getRowOrBefore(Bytes.add(row,one), HConstants.CATALOG_FAMILY); - assertTrue(result.containsColumn(HConstants.CATALOG_FAMILY, null)); - assertTrue(Bytes.equals(result.getValue(HConstants.CATALOG_FAMILY, null), one)); - } - /** * For HBASE-2156 * @throws Exception diff --git src/test/java/org/apache/hadoop/hbase/client/TestMetaMigrationRemovingHTD.java src/test/java/org/apache/hadoop/hbase/client/TestMetaMigrationRemovingHTD.java deleted file mode 100644 index d1c15af..0000000 --- src/test/java/org/apache/hadoop/hbase/client/TestMetaMigrationRemovingHTD.java +++ /dev/null @@ -1,363 +0,0 @@ -/** - * Copyright 2010 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import static org.junit.Assert.*; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Set; - -import junit.framework.Assert; -import junit.framework.AssertionFailedError; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.FsShell; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; -import org.apache.hadoop.hbase.catalog.CatalogTracker; -import org.apache.hadoop.hbase.catalog.MetaMigrationRemovingHTD; -import org.apache.hadoop.hbase.catalog.MetaReader; -import org.apache.hadoop.hbase.migration.HRegionInfo090x; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Writables; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** - * Test migration that removes HTableDescriptor from HRegionInfo moving the - * meta version from no version to {@link MetaReader#META_VERSION}. - */ -@Category(MediumTests.class) -public class TestMetaMigrationRemovingHTD { - static final Log LOG = LogFactory.getLog(TestMetaMigrationRemovingHTD.class); - private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private final static String TESTTABLE = "TestTable"; - private final static int ROWCOUNT = 100; - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - // Start up our mini cluster on top of an 0.90 root.dir that has data from - // a 0.90 hbase run -- it has a table with 100 rows in it -- and see if - // we can migrate from 0.90. - TEST_UTIL.startMiniZKCluster(); - TEST_UTIL.startMiniDFSCluster(1); - Path testdir = TEST_UTIL.getDataTestDir("TestMetaMigrationRemovingHTD"); - // Untar our test dir. - File untar = untar(new File(testdir.toString())); - // Now copy the untar up into hdfs so when we start hbase, we'll run from it. - Configuration conf = TEST_UTIL.getConfiguration(); - FsShell shell = new FsShell(conf); - FileSystem fs = FileSystem.get(conf); - // find where hbase will root itself, so we can copy filesystem there - Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath(); - doFsCommand(shell, - new String [] {"-put", untar.toURI().toString(), hbaseRootDir.toString()}); - // See whats in minihdfs. - doFsCommand(shell, new String [] {"-lsr", "/"}); - TEST_UTIL.startMiniHBaseCluster(1, 1); - // Assert we are running against the copied-up filesystem. The copied-up - // rootdir should have had a table named 'TestTable' in it. Assert it - // present. - HTable t = new HTable(TEST_UTIL.getConfiguration(), TESTTABLE); - ResultScanner scanner = t.getScanner(new Scan()); - int count = 0; - while (scanner.next() != null) { - count++; - } - // Assert that we find all 100 rows that are in the data we loaded. If - // so then we must have migrated it from 0.90 to 0.92. - Assert.assertEquals(ROWCOUNT, count); - scanner.close(); - t.close(); - } - - private static File untar(final File testdir) throws IOException { - // Find the src data under src/test/data - final String datafile = "hbase-4388-root.dir"; - String srcTarFile = - System.getProperty("project.build.testSourceDirectory", "src/test") + - File.separator + "data" + File.separator + datafile + ".tgz"; - File homedir = new File(testdir.toString()); - File tgtUntarDir = new File(homedir, datafile); - if (tgtUntarDir.exists()) { - if (!FileUtil.fullyDelete(tgtUntarDir)) { - throw new IOException("Failed delete of " + tgtUntarDir.toString()); - } - } - LOG.info("Untarring " + srcTarFile + " into " + homedir.toString()); - FileUtil.unTar(new File(srcTarFile), homedir); - Assert.assertTrue(tgtUntarDir.exists()); - return tgtUntarDir; - } - - private static void doFsCommand(final FsShell shell, final String [] args) - throws Exception { - // Run the 'put' command. - int errcode = shell.run(args); - if (errcode != 0) throw new IOException("Failed put; errcode=" + errcode); - } - - /** - * @throws java.lang.Exception - */ - @AfterClass - public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - @Test - public void testMetaUpdatedFlagInROOT() throws Exception { - boolean metaUpdated = MetaMigrationRemovingHTD. - isMetaHRIUpdated(TEST_UTIL.getMiniHBaseCluster().getMaster()); - assertEquals(true, metaUpdated); - } - - @Test - public void testMetaMigration() throws Exception { - LOG.info("Starting testMetaWithLegacyHRI"); - final byte [] FAMILY = Bytes.toBytes("family"); - HTableDescriptor htd = new HTableDescriptor("testMetaMigration"); - HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); - htd.addFamily(hcd); - Configuration conf = TEST_UTIL.getConfiguration(); - createMultiRegionsWithLegacyHRI(conf, htd, FAMILY, - new byte[][]{ - HConstants.EMPTY_START_ROW, - Bytes.toBytes("region_a"), - Bytes.toBytes("region_b")}); - CatalogTracker ct = - TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker(); - // Erase the current version of root meta for this test. - undoVersionInMeta(); - MetaReader.fullScanMetaAndPrint(ct); - LOG.info("Meta Print completed.testUpdatesOnMetaWithLegacyHRI"); - - Set htds = - MetaMigrationRemovingHTD.updateMetaWithNewRegionInfo( - TEST_UTIL.getHBaseCluster().getMaster()); - MetaReader.fullScanMetaAndPrint(ct); - // Should be one entry only and it should be for the table we just added. - assertEquals(1, htds.size()); - assertTrue(htds.contains(htd)); - // Assert that the flag in ROOT is updated to reflect the correct status - boolean metaUpdated = - MetaMigrationRemovingHTD.isMetaHRIUpdated( - TEST_UTIL.getMiniHBaseCluster().getMaster()); - assertEquals(true, metaUpdated); - } - - /** - * This test assumes a master crash/failure during the meta migration process - * and attempts to continue the meta migration process when a new master takes over. - * When a master dies during the meta migration we will have some rows of - * META.CatalogFamily updated with new HRI, (i.e HRI with out HTD) and some - * still hanging with legacy HRI. (i.e HRI with HTD). When the backup master/ or - * fresh start of master attempts the migration it will encouter some rows of META - * already updated with new HRI and some still legacy. This test will simulate this - * scenario and validates that the migration process can safely skip the updated - * rows and migrate any pending rows at startup. - * @throws Exception - */ - @Test - public void testMasterCrashDuringMetaMigration() throws Exception { - final byte[] FAMILY = Bytes.toBytes("family"); - HTableDescriptor htd = new HTableDescriptor("testMasterCrashDuringMetaMigration"); - HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); - htd.addFamily(hcd); - Configuration conf = TEST_UTIL.getConfiguration(); - // Create 10 New regions. - createMultiRegionsWithNewHRI(conf, htd, FAMILY, 10); - // Create 10 Legacy regions. - createMultiRegionsWithLegacyHRI(conf, htd, FAMILY, 10); - CatalogTracker ct = - TEST_UTIL.getMiniHBaseCluster().getMaster().getCatalogTracker(); - // Erase the current version of root meta for this test. - undoVersionInMeta(); - MetaMigrationRemovingHTD.updateRootWithMetaMigrationStatus(ct); - //MetaReader.fullScanMetaAndPrint(ct); - LOG.info("Meta Print completed.testUpdatesOnMetaWithLegacyHRI"); - - Set htds = - MetaMigrationRemovingHTD.updateMetaWithNewRegionInfo( - TEST_UTIL.getHBaseCluster().getMaster()); - assertEquals(1, htds.size()); - assertTrue(htds.contains(htd)); - // Assert that the flag in ROOT is updated to reflect the correct status - boolean metaUpdated = MetaMigrationRemovingHTD. - isMetaHRIUpdated(TEST_UTIL.getMiniHBaseCluster().getMaster()); - assertEquals(true, metaUpdated); - LOG.info("END testMetaWithLegacyHRI"); - } - - private void undoVersionInMeta() throws IOException { - Delete d = new Delete(HRegionInfo.ROOT_REGIONINFO.getRegionName()); - // Erase the current version of root meta for this test. - d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.META_VERSION_QUALIFIER); - HTable rootTable = - new HTable(TEST_UTIL.getConfiguration(), HConstants.ROOT_TABLE_NAME); - try { - rootTable.delete(d); - } finally { - rootTable.close(); - } - } - - public static void assertEquals(int expected, int actual) { - if (expected != actual) { - throw new AssertionFailedError("expected:<" + - expected + "> but was:<" + - actual + ">"); - } - } - - public static void assertEquals(boolean expected, boolean actual) { - if (expected != actual) { - throw new AssertionFailedError("expected:<" + - expected + "> but was:<" + - actual + ">"); - } - } - - - /** - * @param c - * @param htd - * @param family - * @param numRegions - * @return - * @throws IOException - * @deprecated Just for testing migration of meta from 0.90 to 0.92... will be - * removed thereafter - */ - public int createMultiRegionsWithLegacyHRI(final Configuration c, - final HTableDescriptor htd, final byte [] family, int numRegions) - throws IOException { - if (numRegions < 3) throw new IOException("Must create at least 3 regions"); - byte [] startKey = Bytes.toBytes("aaaaa"); - byte [] endKey = Bytes.toBytes("zzzzz"); - byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3); - byte [][] regionStartKeys = new byte[splitKeys.length+1][]; - for (int i=0;i newRegions - = new ArrayList(startKeys.length); - int count = 0; - for (int i = 0; i < startKeys.length; i++) { - int j = (i + 1) % startKeys.length; - HRegionInfo090x hri = new HRegionInfo090x(htd, - startKeys[i], startKeys[j]); - Put put = new Put(hri.getRegionName()); - put.setWriteToWAL(false); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, - Writables.getBytes(hri)); - meta.put(put); - LOG.info("createMultiRegions: PUT inserted " + hri.toString()); - - newRegions.add(hri); - count++; - } - meta.close(); - return count; - } - - int createMultiRegionsWithNewHRI(final Configuration c, - final HTableDescriptor htd, final byte [] family, int numRegions) - throws IOException { - if (numRegions < 3) throw new IOException("Must create at least 3 regions"); - byte [] startKey = Bytes.toBytes("aaaaa"); - byte [] endKey = Bytes.toBytes("zzzzz"); - byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3); - byte [][] regionStartKeys = new byte[splitKeys.length+1][]; - for (int i=0;i newRegions - = new ArrayList(startKeys.length); - int count = 0; - for (int i = 0; i < startKeys.length; i++) { - int j = (i + 1) % startKeys.length; - HRegionInfo hri = new HRegionInfo(htd.getName(), - startKeys[i], startKeys[j]); - Put put = new Put(hri.getRegionName()); - put.setWriteToWAL(false); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, - Writables.getBytes(hri)); - meta.put(put); - LOG.info("createMultiRegions: PUT inserted " + hri.toString()); - - newRegions.add(hri); - count++; - } - meta.close(); - return count; - } - - @org.junit.Rule - public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu = - new org.apache.hadoop.hbase.ResourceCheckerJUnitRule(); -} - diff --git src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java index dacb936..a2f5dfc 100644 --- src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java +++ src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java @@ -73,8 +73,6 @@ public class SimpleRegionObserver extends BaseRegionObserver { boolean hadPostPut = false; boolean hadPreDeleted = false; boolean hadPostDeleted = false; - boolean hadPreGetClosestRowBefore = false; - boolean hadPostGetClosestRowBefore = false; boolean hadPreIncrement = false; boolean hadPostIncrement = false; boolean hadPreWALRestored = false; @@ -345,32 +343,6 @@ public class SimpleRegionObserver extends BaseRegionObserver { } @Override - public void preGetClosestRowBefore(final ObserverContext c, - final byte[] row, final byte[] family, final Result result) - throws IOException { - RegionCoprocessorEnvironment e = c.getEnvironment(); - assertNotNull(e); - assertNotNull(e.getRegion()); - assertNotNull(row); - assertNotNull(result); - if (beforeDelete) { - hadPreGetClosestRowBefore = true; - } - } - - @Override - public void postGetClosestRowBefore(final ObserverContext c, - final byte[] row, final byte[] family, final Result result) - throws IOException { - RegionCoprocessorEnvironment e = c.getEnvironment(); - assertNotNull(e); - assertNotNull(e.getRegion()); - assertNotNull(row); - assertNotNull(result); - hadPostGetClosestRowBefore = true; - } - - @Override public Result preIncrement(final ObserverContext c, final Increment increment) throws IOException { hadPreIncrement = true; diff --git src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index d2b3060..8925d58 100644 --- src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -237,13 +237,6 @@ class MockRegionServer implements HRegionInterface, RegionServerServices { } @Override - public Result getClosestRowBefore(byte[] regionName, byte[] row, - byte[] family) throws IOException { - // TODO Auto-generated method stub - return null; - } - - @Override public Result get(byte[] regionName, Get get) throws IOException { Map m = this.gets.get(regionName); if (m == null) return null; diff --git src/test/java/org/apache/hadoop/hbase/migration/TestMigration.java src/test/java/org/apache/hadoop/hbase/migration/TestMigration.java new file mode 100644 index 0000000..e637c31 --- /dev/null +++ src/test/java/org/apache/hadoop/hbase/migration/TestMigration.java @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.migration; + + +import java.io.IOException; + +import junit.framework.Assert; + +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.catalog.MetaMigrationRemovingHTD; +import org.apache.hadoop.hbase.util.Writables; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(LargeTests.class) +public class TestMigration { + @Test + public void testMigrateHRegionInfoFromVersion0toVersion2() + throws IOException { + HTableDescriptor htd = + getHTableDescriptor("testMigrateHRegionInfoFromVersion0toVersion2"); + HRegionInfo090x ninety = + new HRegionInfo090x(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); + byte [] bytes = Writables.getBytes(ninety); + // Now deserialize into an HRegionInfo + HRegionInfo hri = Writables.getHRegionInfo(bytes); + Assert.assertEquals(hri.getTableNameAsString(), + ninety.getTableDesc().getNameAsString()); + Assert.assertEquals(HRegionInfo.VERSION, hri.getVersion()); + } + + @Test + public void testMigrateHRegionInfoFromVersion1toVersion2() + throws IOException { + String tableName = "testMigrateHRegionInfoFromVersion1toVersion2"; + byte[] tableNameB = Bytes.toBytes(tableName); + + HRegionInfo090x2 ninety = + new HRegionInfo090x2(tableNameB, + HConstants.EMPTY_START_ROW, + HConstants.EMPTY_END_ROW); + byte [] bytes = Writables.getBytes(ninety); + // Now deserialize into an HRegionInfo + HRegionInfo hri = Writables.getHRegionInfo(bytes); + Assert.assertEquals(hri.getTableNameAsString(), + Bytes.toString(ninety.getTableName())); + Assert.assertEquals(HRegionInfo.VERSION, hri.getVersion()); + } + + + private HTableDescriptor getHTableDescriptor(final String name) { + HTableDescriptor htd = new HTableDescriptor(name); + htd.addFamily(new HColumnDescriptor("family")); + return htd; + } + + @org.junit.Rule + public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu = + new org.apache.hadoop.hbase.ResourceCheckerJUnitRule(); +} + diff --git src/test/java/org/apache/hadoop/hbase/migration/TestMigrationFrom090To092.java src/test/java/org/apache/hadoop/hbase/migration/TestMigrationFrom090To092.java deleted file mode 100644 index c3651ac..0000000 --- src/test/java/org/apache/hadoop/hbase/migration/TestMigrationFrom090To092.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.migration; - - -import java.io.IOException; - -import junit.framework.Assert; - -import org.apache.hadoop.hbase.*; -import org.apache.hadoop.hbase.catalog.MetaMigrationRemovingHTD; -import org.apache.hadoop.hbase.util.Writables; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** - * Migration tests that do not need spin up of a cluster. - * @deprecated Remove after we release 0.92 - */ -@Category(SmallTests.class) -public class TestMigrationFrom090To092 { - @Test - public void testMigrateHRegionInfoFromVersion0toVersion1() - throws IOException { - HTableDescriptor htd = - getHTableDescriptor("testMigrateHRegionInfoFromVersion0toVersion1"); - HRegionInfo090x ninety = - new HRegionInfo090x(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); - byte [] bytes = Writables.getBytes(ninety); - // Now deserialize into an HRegionInfo - HRegionInfo hri = Writables.getHRegionInfo(bytes); - Assert.assertEquals(hri.getTableNameAsString(), - ninety.getTableDesc().getNameAsString()); - Assert.assertEquals(HRegionInfo.VERSION, hri.getVersion()); - } - - private HTableDescriptor getHTableDescriptor(final String name) { - HTableDescriptor htd = new HTableDescriptor(name); - htd.addFamily(new HColumnDescriptor("family")); - return htd; - } - - @org.junit.Rule - public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu = - new org.apache.hadoop.hbase.ResourceCheckerJUnitRule(); -} - diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java deleted file mode 100644 index 5f97167..0000000 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ /dev/null @@ -1,348 +0,0 @@ -/** - * Copyright 2009 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Writables; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.junit.experimental.categories.Category; - -/** - * {@link TestGet} is a medley of tests of get all done up as a single test. - * This class - */ -@Category(SmallTests.class) -public class TestGetClosestAtOrBefore extends HBaseTestCase { - private static final Log LOG = LogFactory.getLog(TestGetClosestAtOrBefore.class); - - private static final byte[] T00 = Bytes.toBytes("000"); - private static final byte[] T10 = Bytes.toBytes("010"); - private static final byte[] T11 = Bytes.toBytes("011"); - private static final byte[] T12 = Bytes.toBytes("012"); - private static final byte[] T20 = Bytes.toBytes("020"); - private static final byte[] T30 = Bytes.toBytes("030"); - private static final byte[] T31 = Bytes.toBytes("031"); - private static final byte[] T35 = Bytes.toBytes("035"); - private static final byte[] T40 = Bytes.toBytes("040"); - - - - public void testUsingMetaAndBinary() throws IOException { - FileSystem filesystem = FileSystem.get(conf); - Path rootdir = testDir; - // Up flush size else we bind up when we use default catalog flush of 16k. - HTableDescriptor.META_TABLEDESC.setMemStoreFlushSize(64 * 1024 * 1024); - - HRegion mr = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, - rootdir, this.conf, HTableDescriptor.META_TABLEDESC); - try { - // Write rows for three tables 'A', 'B', and 'C'. - for (char c = 'A'; c < 'D'; c++) { - HTableDescriptor htd = new HTableDescriptor("" + c); - final int last = 128; - final int interval = 2; - for (int i = 0; i <= last; i += interval) { - HRegionInfo hri = new HRegionInfo(htd.getName(), - i == 0? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i), - i == last? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i + interval)); - Put put = new Put(hri.getRegionName()); - put.setWriteToWAL(false); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, - Writables.getBytes(hri)); - mr.put(put, false); - } - } - InternalScanner s = mr.getScanner(new Scan()); - try { - List keys = new ArrayList(); - while(s.next(keys)) { - LOG.info(keys); - keys.clear(); - } - } finally { - s.close(); - } - findRow(mr, 'C', 44, 44); - findRow(mr, 'C', 45, 44); - findRow(mr, 'C', 46, 46); - findRow(mr, 'C', 43, 42); - mr.flushcache(); - findRow(mr, 'C', 44, 44); - findRow(mr, 'C', 45, 44); - findRow(mr, 'C', 46, 46); - findRow(mr, 'C', 43, 42); - // Now delete 'C' and make sure I don't get entries from 'B'. - byte [] firstRowInC = HRegionInfo.createRegionName(Bytes.toBytes("" + 'C'), - HConstants.EMPTY_BYTE_ARRAY, HConstants.ZEROES, false); - Scan scan = new Scan(firstRowInC); - s = mr.getScanner(scan); - try { - List keys = new ArrayList(); - while (s.next(keys)) { - mr.delete(new Delete(keys.get(0).getRow()), null, false); - keys.clear(); - } - } finally { - s.close(); - } - // Assert we get null back (pass -1). - findRow(mr, 'C', 44, -1); - findRow(mr, 'C', 45, -1); - findRow(mr, 'C', 46, -1); - findRow(mr, 'C', 43, -1); - mr.flushcache(); - findRow(mr, 'C', 44, -1); - findRow(mr, 'C', 45, -1); - findRow(mr, 'C', 46, -1); - findRow(mr, 'C', 43, -1); - } finally { - if (mr != null) { - try { - mr.close(); - } catch (Exception e) { - e.printStackTrace(); - } - mr.getLog().closeAndDelete(); - } - } - } - - /* - * @param mr - * @param table - * @param rowToFind - * @param answer Pass -1 if we're not to find anything. - * @return Row found. - * @throws IOException - */ - private byte [] findRow(final HRegion mr, final char table, - final int rowToFind, final int answer) - throws IOException { - byte [] tableb = Bytes.toBytes("" + table); - // Find the row. - byte [] tofindBytes = Bytes.toBytes((short)rowToFind); - byte [] metaKey = HRegionInfo.createRegionName(tableb, tofindBytes, - HConstants.NINES, false); - LOG.info("find=" + new String(metaKey)); - Result r = mr.getClosestRowBefore(metaKey); - if (answer == -1) { - assertNull(r); - return null; - } - assertTrue(Bytes.compareTo(Bytes.toBytes((short)answer), - extractRowFromMetaRow(r.getRow())) == 0); - return r.getRow(); - } - - private byte [] extractRowFromMetaRow(final byte [] b) { - int firstDelimiter = KeyValue.getDelimiter(b, 0, b.length, - HRegionInfo.DELIMITER); - int lastDelimiter = KeyValue.getDelimiterInReverse(b, 0, b.length, - HRegionInfo.DELIMITER); - int length = lastDelimiter - firstDelimiter - 1; - byte [] row = new byte[length]; - System.arraycopy(b, firstDelimiter + 1, row, 0, length); - return row; - } - - /** - * Test file of multiple deletes and with deletes as final key. - * @see HBASE-751 - */ - public void testGetClosestRowBefore3() throws IOException{ - HRegion region = null; - byte [] c0 = COLUMNS[0]; - byte [] c1 = COLUMNS[1]; - try { - HTableDescriptor htd = createTableDescriptor(getName()); - region = createNewHRegion(htd, null, null); - - Put p = new Put(T00); - p.add(c0, c0, T00); - region.put(p); - - p = new Put(T10); - p.add(c0, c0, T10); - region.put(p); - - p = new Put(T20); - p.add(c0, c0, T20); - region.put(p); - - Result r = region.getClosestRowBefore(T20, c0); - assertTrue(Bytes.equals(T20, r.getRow())); - - Delete d = new Delete(T20); - d.deleteColumn(c0, c0); - region.delete(d, null, false); - - r = region.getClosestRowBefore(T20, c0); - assertTrue(Bytes.equals(T10, r.getRow())); - - p = new Put(T30); - p.add(c0, c0, T30); - region.put(p); - - r = region.getClosestRowBefore(T30, c0); - assertTrue(Bytes.equals(T30, r.getRow())); - - d = new Delete(T30); - d.deleteColumn(c0, c0); - region.delete(d, null, false); - - r = region.getClosestRowBefore(T30, c0); - assertTrue(Bytes.equals(T10, r.getRow())); - r = region.getClosestRowBefore(T31, c0); - assertTrue(Bytes.equals(T10, r.getRow())); - - region.flushcache(); - - // try finding "010" after flush - r = region.getClosestRowBefore(T30, c0); - assertTrue(Bytes.equals(T10, r.getRow())); - r = region.getClosestRowBefore(T31, c0); - assertTrue(Bytes.equals(T10, r.getRow())); - - // Put into a different column family. Should make it so I still get t10 - p = new Put(T20); - p.add(c1, c1, T20); - region.put(p); - - r = region.getClosestRowBefore(T30, c0); - assertTrue(Bytes.equals(T10, r.getRow())); - r = region.getClosestRowBefore(T31, c0); - assertTrue(Bytes.equals(T10, r.getRow())); - - region.flushcache(); - - r = region.getClosestRowBefore(T30, c0); - assertTrue(Bytes.equals(T10, r.getRow())); - r = region.getClosestRowBefore(T31, c0); - assertTrue(Bytes.equals(T10, r.getRow())); - - // Now try combo of memcache and mapfiles. Delete the t20 COLUMS[1] - // in memory; make sure we get back t10 again. - d = new Delete(T20); - d.deleteColumn(c1, c1); - region.delete(d, null, false); - r = region.getClosestRowBefore(T30, c0); - assertTrue(Bytes.equals(T10, r.getRow())); - - // Ask for a value off the end of the file. Should return t10. - r = region.getClosestRowBefore(T31, c0); - assertTrue(Bytes.equals(T10, r.getRow())); - region.flushcache(); - r = region.getClosestRowBefore(T31, c0); - assertTrue(Bytes.equals(T10, r.getRow())); - - // Ok. Let the candidate come out of hfile but have delete of - // the candidate be in memory. - p = new Put(T11); - p.add(c0, c0, T11); - region.put(p); - d = new Delete(T10); - d.deleteColumn(c1, c1); - r = region.getClosestRowBefore(T12, c0); - assertTrue(Bytes.equals(T11, r.getRow())); - } finally { - if (region != null) { - try { - region.close(); - } catch (Exception e) { - e.printStackTrace(); - } - region.getLog().closeAndDelete(); - } - } - } - - /** For HBASE-694 */ - public void testGetClosestRowBefore2() throws IOException{ - HRegion region = null; - byte [] c0 = COLUMNS[0]; - try { - HTableDescriptor htd = createTableDescriptor(getName()); - region = createNewHRegion(htd, null, null); - - Put p = new Put(T10); - p.add(c0, c0, T10); - region.put(p); - - p = new Put(T30); - p.add(c0, c0, T30); - region.put(p); - - p = new Put(T40); - p.add(c0, c0, T40); - region.put(p); - - // try finding "035" - Result r = region.getClosestRowBefore(T35, c0); - assertTrue(Bytes.equals(T30, r.getRow())); - - region.flushcache(); - - // try finding "035" - r = region.getClosestRowBefore(T35, c0); - assertTrue(Bytes.equals(T30, r.getRow())); - - p = new Put(T20); - p.add(c0, c0, T20); - region.put(p); - - // try finding "035" - r = region.getClosestRowBefore(T35, c0); - assertTrue(Bytes.equals(T30, r.getRow())); - - region.flushcache(); - - // try finding "035" - r = region.getClosestRowBefore(T35, c0); - assertTrue(Bytes.equals(T30, r.getRow())); - } finally { - if (region != null) { - try { - region.close(); - } catch (Exception e) { - e.printStackTrace(); - } - region.getLog().closeAndDelete(); - } - } - } - - @org.junit.Rule - public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu = - new org.apache.hadoop.hbase.ResourceCheckerJUnitRule(); -} - diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java index 6dfba41..575962f 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java @@ -30,8 +30,6 @@ import java.io.IOException; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; -import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.MD5Hash; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -43,22 +41,24 @@ public class TestHRegionInfo { final byte [] tn = Bytes.toBytes(tableName); String startKey = "startkey"; final byte [] sk = Bytes.toBytes(startKey); - String id = "id"; + String endKey = "endkey"; + final byte [] ek = Bytes.toBytes(endKey); + + String id = "id"; // old format region name - byte [] name = HRegionInfo.createRegionName(tn, sk, id, false); + byte [] name = HRegionInfo.createRegionName(tn, sk, ek, id, false); String nameStr = Bytes.toString(name); - assertEquals(tableName + "," + startKey + "," + id, nameStr); + assertEquals(tableName + "!," + endKey + "," + id, nameStr); - // new format region name. - String md5HashInHex = MD5Hash.getMD5AsHex(name); + // This should not change as it changes the location on HDFS. + String md5HashInHex = "e3365833b0bc503b5c0f4e8441da5897"; assertEquals(HRegionInfo.MD5_HEX_LENGTH, md5HashInHex.length()); - name = HRegionInfo.createRegionName(tn, sk, id, true); + name = HRegionInfo.createRegionName(tn, sk, ek, id, true); nameStr = Bytes.toString(name); - assertEquals(tableName + "," + startKey + "," - + id + "." + md5HashInHex + ".", - nameStr); + assertEquals(tableName + "!," + endKey + "," + id + "." + md5HashInHex + + ".", nameStr); } @Test diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java index 33c78ab..fa1f37d 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java @@ -47,48 +47,6 @@ public class TestMinVersions extends HBaseTestCase { private final byte[] c0 = COLUMNS[0]; /** - * Verify behavior of getClosestBefore(...) - */ - public void testGetClosestBefore() throws Exception { - HTableDescriptor htd = createTableDescriptor(getName(), 1, 1000, 1, false); - HRegion region = createNewHRegion(htd, null, null); - - // 2s in the past - long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; - - Put p = new Put(T1, ts); - p.add(c0, c0, T1); - region.put(p); - - p = new Put(T1, ts+1); - p.add(c0, c0, T4); - region.put(p); - - p = new Put(T3, ts); - p.add(c0, c0, T3); - region.put(p); - - // now make sure that getClosestBefore(...) get can - // rows that would be expired without minVersion. - // also make sure it gets the latest version - Result r = region.getClosestRowBefore(T1, c0); - checkResult(r, c0, T4); - - r = region.getClosestRowBefore(T2, c0); - checkResult(r, c0, T4); - - // now flush/compact - region.flushcache(); - region.compactStores(true); - - r = region.getClosestRowBefore(T1, c0); - checkResult(r, c0, T4); - - r = region.getClosestRowBefore(T2, c0); - checkResult(r, c0, T4); - } - - /** * Test mixed memstore and storefile scanning * with minimum versions. */ diff --git src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java index cffdcb6..108c9a1 100644 --- src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java +++ src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java @@ -27,6 +27,8 @@ import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.rest.client.Client; import org.apache.hadoop.hbase.rest.client.Cluster; @@ -42,8 +44,19 @@ import org.junit.experimental.categories.Category; @Category(MediumTests.class) public class TestStatusResource { - private static final byte[] ROOT_REGION_NAME = Bytes.toBytes("-ROOT-,,0"); - private static final byte[] META_REGION_NAME = Bytes.toBytes(".META.,,1"); + + private static final byte[] ROOT_REGION_NAME = HRegionInfo.createRegionName(HConstants.ROOT_TABLE_NAME, + HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, + "0".getBytes(), + false); + + private static final byte[] META_REGION_NAME = HRegionInfo.createRegionName(HConstants.META_TABLE_NAME, + HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, + "1".getBytes(), + false); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static final HBaseRESTTestingUtility REST_TEST_UTIL = diff --git src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java index c44f720..32e91d9 100644 --- src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java +++ src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java @@ -45,25 +45,19 @@ public class TestStorageClusterStatusModel extends TestCase { " name=\"test1\" maxHeapSizeMB=\"1024\" heapSizeMB=\"128\">" + "" + + " memstoreSizeMB=\"0\"/>" + "" + ""+ + " memstoreSizeMB=\"0\"/>"+ ""; - private static final String AS_PB = - "CjsKBXRlc3QxEOO6i+eeJBgAIIABKIAIMiMKCS1ST09ULSwsMBABGAEgACgAMAA4AUACSAFQAVgB" + - "YAFoAQpHCgV0ZXN0MhD+krHwniQYACCABCiACDIvChUuTUVUQS4sLDEyNDYwMDAwNDM3MjQQARgB" + - "IAAoADAAOAFAAkgBUAFYAWABaAEYAiAAKQAAAAAAAPA/"; - + private static final String AS_PB = +"Ci0KBXRlc3QxEOO6i+eeJBgAIIABKIAIMhUKCS1ST09ULSwsMBABGAEgACgAMAAKOQoFdGVzdDIQ"+ +"/pKx8J4kGAAggAQogAgyIQoVLk1FVEEuLCwxMjQ2MDAwMDQzNzI0EAEYASAAKAAwABgCIAApAAAA"+ +"AAAA8D8="; + private JAXBContext context; public TestStorageClusterStatusModel() throws JAXBException { @@ -77,10 +71,9 @@ public class TestStorageClusterStatusModel extends TestCase { model.setRequests(0); model.setAverageLoad(1.0); model.addLiveNode("test1", 1245219839331L, 128, 1024) - .addRegion(Bytes.toBytes("-ROOT-,,0"), 1, 1, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1); + .addRegion(Bytes.toBytes("-ROOT-,,0"), 1, 1, 0, 0, 0); model.addLiveNode("test2", 1245239331198L, 512, 1024) - .addRegion(Bytes.toBytes(".META.,,1246000043724"),1, 1, 0, 0, 0, - 1, 2, 1, 1, 1, 1, 1); + .addRegion(Bytes.toBytes(".META.,,1246000043724"),1, 1, 0, 0, 0); return model; } @@ -126,13 +119,6 @@ public class TestStorageClusterStatusModel extends TestCase { assertEquals(region.getStorefileSizeMB(), 0); assertEquals(region.getMemstoreSizeMB(), 0); assertEquals(region.getStorefileIndexSizeMB(), 0); - assertEquals(region.getReadRequestsCount(), 1); - assertEquals(region.getWriteRequestsCount(), 2); - assertEquals(region.getRootIndexSizeKB(), 1); - assertEquals(region.getTotalStaticIndexSizeKB(), 1); - assertEquals(region.getTotalStaticBloomSizeKB(), 1); - assertEquals(region.getTotalCompactingKVs(), 1); - assertEquals(region.getCurrentCompactedKVs(), 1); assertFalse(regions.hasNext()); node = nodes.next(); assertEquals(node.getName(), "test2"); @@ -147,14 +133,6 @@ public class TestStorageClusterStatusModel extends TestCase { assertEquals(region.getStorefileSizeMB(), 0); assertEquals(region.getMemstoreSizeMB(), 0); assertEquals(region.getStorefileIndexSizeMB(), 0); - assertEquals(region.getReadRequestsCount(), 1); - assertEquals(region.getWriteRequestsCount(), 2); - assertEquals(region.getRootIndexSizeKB(), 1); - assertEquals(region.getTotalStaticIndexSizeKB(), 1); - assertEquals(region.getTotalStaticBloomSizeKB(), 1); - assertEquals(region.getTotalCompactingKVs(), 1); - assertEquals(region.getCurrentCompactedKVs(), 1); - assertFalse(regions.hasNext()); assertFalse(nodes.hasNext()); } diff --git src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java index b6f0ab5..754c04b 100644 --- src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java +++ src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java @@ -45,7 +45,8 @@ public class TestTableRegionModel extends TestCase { " endKey=\"enp5eng=\"" + " startKey=\"YWJyYWNhZGJyYQ==\"" + " id=\"8731042424\"" + - " name=\"testtable,abracadbra,8731042424\"/>"; + " table=\"testtable\"" + + " name=\"testtable,zzyzx,8731042424\"/>"; private JAXBContext context; @@ -55,9 +56,7 @@ public class TestTableRegionModel extends TestCase { } private TableRegionModel buildTestModel() { - TableRegionModel model = - new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION); - return model; + return new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION); } @SuppressWarnings("unused") @@ -77,9 +76,12 @@ public class TestTableRegionModel extends TestCase { assertTrue(Bytes.equals(model.getEndKey(), END_KEY)); assertEquals(model.getId(), ID); assertEquals(model.getLocation(), LOCATION); - assertEquals(model.getName(), - TABLE + "," + Bytes.toString(START_KEY) + "," + Long.toString(ID) + - ".ad9860f031282c46ed431d7af8f94aca."); + byte[] regionInfo = HRegionInfo.createRegionName(TABLE.getBytes(), + START_KEY, + END_KEY, + Long.toString(ID).getBytes(), + true); + assertEquals(model.getName(), Bytes.toStringBinary(regionInfo)); } public void testBuildModel() throws Exception { @@ -90,17 +92,13 @@ public class TestTableRegionModel extends TestCase { TableRegionModel model = buildTestModel(); String modelName = model.getName(); HRegionInfo hri = new HRegionInfo(Bytes.toBytes(TABLE), - START_KEY, END_KEY, false, ID); + START_KEY, + END_KEY, + false, + ID); assertEquals(modelName, hri.getRegionNameAsString()); } - public void testSetName() { - TableRegionModel model = buildTestModel(); - String name = model.getName(); - model.setName(name); - assertEquals(name, model.getName()); - } - public void testFromXML() throws Exception { checkModel(fromXML(AS_XML)); } diff --git src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java index 444d6d5..a087faa 100644 --- src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java +++ src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java @@ -19,10 +19,6 @@ */ package org.apache.hadoop.hbase.thrift; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -32,10 +28,7 @@ import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.filter.ParseFilter; import org.apache.hadoop.hbase.thrift.generated.BatchMutation; import org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor; @@ -55,6 +48,8 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import static org.junit.Assert.*; + /** * Unit testing for ThriftServerRunner.HBaseHandler, a part of the * org.apache.hadoop.hbase.thrift package. @@ -91,28 +86,7 @@ public class TestThriftServer { UTIL.shutdownMiniCluster(); } - /** - * Runs all of the tests under a single JUnit test method. We - * consolidate all testing to one method because HBaseClusterTestCase - * is prone to OutOfMemoryExceptions when there are three or more - * JUnit test methods. - * - * @throws Exception - */ - @Test - public void testAll() throws Exception { - // Run all tests - doTestTableCreateDrop(); - doTestThriftMetrics(); - doTestTableMutations(); - doTestTableTimestampsAndColumns(); - doTestTableScanners(); - doTestGetTableRegions(); - doTestFilterRegistration(); - doTestGetRegionInfo(); - } - - /** + /** * Tests for creating, enabling, disabling, and deleting tables. Also * tests that creating a table with an invalid column name yields an * IllegalArgument exception. @@ -159,7 +133,7 @@ public class TestThriftServer { private static void setupMetricsContext() throws IOException { ContextFactory factory = ContextFactory.getFactory(); factory.setAttribute(ThriftMetrics.CONTEXT_NAME + ".class", - NoEmitMetricsContext.class.getName()); + NoEmitMetricsContext.class.getName()); MetricsUtil.getContext(ThriftMetrics.CONTEXT_NAME) .createRecord(ThriftMetrics.CONTEXT_NAME).remove(); } @@ -208,6 +182,7 @@ public class TestThriftServer { * * @throws Exception */ + @Test public void doTestTableMutations() throws Exception { // Setup ThriftServerRunner.HBaseHandler handler = @@ -221,11 +196,11 @@ public class TestThriftServer { // Assert that the changes were made assertEquals(valueAname, - handler.get(tableAname, rowAname, columnAname, null).get(0).value); + handler.get(tableAname, rowAname, columnAname, null).get(0).value); TRowResult rowResult1 = handler.getRow(tableAname, rowAname, null).get(0); assertEquals(rowAname, rowResult1.row); assertEquals(valueBname, - rowResult1.columns.get(columnBname).value); + rowResult1.columns.get(columnBname).value); // Apply a few BatchMutations for rowA and rowB // rowAmutations.add(new Mutation(true, columnAname, null)); @@ -281,6 +256,7 @@ public class TestThriftServer { * * @throws Exception */ + @Test public void doTestTableTimestampsAndColumns() throws Exception { // Setup ThriftServerRunner.HBaseHandler handler = @@ -360,6 +336,7 @@ public class TestThriftServer { * * @throws Exception */ + @Test public void doTestTableScanners() throws Exception { // Setup ThriftServerRunner.HBaseHandler handler = @@ -429,6 +406,7 @@ public class TestThriftServer { * * @throws Exception */ + @Test public void doTestGetTableRegions() throws Exception { ThriftServerRunner.HBaseHandler handler = new ThriftServerRunner.HBaseHandler(UTIL.getConfiguration()); @@ -464,19 +442,15 @@ public class TestThriftServer { assertEquals("filterclass", registeredFilters.get("MyFilter")); } - public void doTestGetRegionInfo() throws Exception { - ThriftServerRunner.HBaseHandler handler = - new ThriftServerRunner.HBaseHandler(UTIL.getConfiguration()); - doTestGetRegionInfo(handler); - } - public static void doTestGetRegionInfo(Hbase.Iface handler) throws Exception { // Create tableA and add two columns to rowA handler.createTable(tableAname, getColumnDescriptors()); try { handler.mutateRow(tableAname, rowAname, getMutations(), null); - byte[] searchRow = HRegionInfo.createRegionName( - tableAname.array(), rowAname.array(), HConstants.NINES, false); + byte[] searchRow = HRegionInfo.createRegionName(tableAname.array(), + rowAname.array(), + Long.parseLong(HConstants.NINES), + false); TRegionInfo regionInfo = handler.getRegionInfo(ByteBuffer.wrap(searchRow)); assertTrue(Bytes.toStringBinary(regionInfo.getName()).startsWith( Bytes.toStringBinary(tableAname))); @@ -575,6 +549,58 @@ public class TestThriftServer { handler.scannerClose(scannerId); } + + /** + * Tests for creating, enabling, disabling, and deleting tables. Also + * tests that creating a table with an invalid column name yields an + * IllegalArgument exception. + * + * @throws Exception + */ + @Test + public void doTestGetRegionInfo() throws Exception { + ThriftServerRunner.HBaseHandler handler = + new ThriftServerRunner.HBaseHandler(UTIL.getConfiguration()); + + createTestTables(handler); + + ByteBuffer searchRow; + byte[] startRow; + byte[] tableName; + TRegionInfo regionInfo; + + startRow = HTableDescriptor.getStartRow(tableAname.array(), + "".getBytes()); + searchRow = ByteBuffer.wrap(startRow); + regionInfo = handler.getRegionInfo(searchRow); + tableName = HRegionInfo.parseRegionName(regionInfo.getName())[0]; + assertArrayEquals(tableAname.array(), tableName); + + startRow = HTableDescriptor.getStartRow(tableAname.array(), + "zzzzz".getBytes()); + searchRow = ByteBuffer.wrap(startRow); + regionInfo = handler.getRegionInfo(searchRow); + tableName = HRegionInfo.parseRegionName(regionInfo.getName())[0]; + assertArrayEquals(tableAname.array(), tableName); + + + startRow = HTableDescriptor.getStartRow(tableBname.array(), + "".getBytes()); + searchRow = ByteBuffer.wrap(startRow); + regionInfo = handler.getRegionInfo(searchRow); + tableName = HRegionInfo.parseRegionName(regionInfo.getName())[0]; + assertArrayEquals(tableBname.array(), tableName); + + startRow = HTableDescriptor.getStartRow(tableBname.array(), + "zzzzz".getBytes()); + searchRow = ByteBuffer.wrap(startRow); + regionInfo = handler.getRegionInfo(searchRow); + tableName = HRegionInfo.parseRegionName(regionInfo.getName())[0]; + assertArrayEquals(tableBname.array(), tableName); + + dropTestTables(handler); + } + @org.junit.Rule public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu = new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();