From a03aad8416ab28c02e3416833897eaf2c84e02e9 Mon Sep 17 00:00:00 2001 From: Alex Newman Date: Tue, 13 Dec 2011 11:32:00 -0800 Subject: [PATCH 1/3] Changed regioninfo format to use endKey instead of startkey --- .../java/org/apache/hadoop/hbase/HConstants.java | 5 +- .../java/org/apache/hadoop/hbase/HRegionInfo.java | 172 +++++++----- .../java/org/apache/hadoop/hbase/KeyValue.java | 29 ++- .../apache/hadoop/hbase/catalog/MetaReader.java | 31 +-- .../org/apache/hadoop/hbase/client/HBaseAdmin.java | 6 +- .../hadoop/hbase/client/HConnectionManager.java | 130 ++++++--- .../apache/hadoop/hbase/client/MetaScanner.java | 48 +--- .../apache/hadoop/hbase/client/MetaSearchRow.java | 62 ++++ .../apache/hadoop/hbase/rest/RegionsResource.java | 17 +- .../hadoop/hbase/rest/model/TableRegionModel.java | 35 ++- .../apache/hadoop/hbase/HBaseTestingUtility.java | 26 +-- .../java/org/apache/hadoop/hbase/TestKeyValue.java | 301 +++++++++++++++----- .../org/apache/hadoop/hbase/client/TestAdmin.java | 13 +- .../regionserver/TestGetClosestAtOrBefore.java | 84 +------ .../hadoop/hbase/regionserver/TestHRegionInfo.java | 18 +- .../hadoop/hbase/regionserver/TestMemStore.java | 31 -- .../TestSplitTransactionOnCluster.java | 8 +- .../hbase/rest/model/TestTableRegionModel.java | 28 +- 18 files changed, 600 insertions(+), 444 deletions(-) create mode 100644 src/main/java/org/apache/hadoop/hbase/client/MetaSearchRow.java diff --git src/main/java/org/apache/hadoop/hbase/HConstants.java src/main/java/org/apache/hadoop/hbase/HConstants.java index 1cf58a9..6c78459 100644 --- src/main/java/org/apache/hadoop/hbase/HConstants.java +++ src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -236,7 +236,7 @@ public final class HConstants { // Always store the location of the root table's HRegion. // This HRegion is never split. - // region name = table + startkey + regionid. This is the row key. + // region name = table + endkey + regionid. This is the row key. // each row in the root and meta tables describes exactly 1 region // Do we ever need to know all the information that we are storing? @@ -259,9 +259,6 @@ public final class HConstants { /** The META table's name. */ public static final byte [] META_TABLE_NAME = Bytes.toBytes(".META."); - /** delimiter used between portions of a region name */ - public static final int META_ROW_DELIMITER = ','; - /** The catalog family as a string*/ public static final String CATALOG_FAMILY_STR = "info"; diff --git src/main/java/org/apache/hadoop/hbase/HRegionInfo.java src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index 74cb821..3710b5a 100644 --- src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -23,6 +23,8 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.EOFException; import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; import java.util.Arrays; import org.apache.commons.logging.Log; @@ -57,13 +59,13 @@ implements WritableComparable { * in the filesystem. * * New region name format: - * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. + * <tablename>,,<endkey>,<regionIdTimestamp>.<encodedName>. * where, * <encodedName> is a hex version of the MD5 hash of - * <tablename>,<startkey>,<regionIdTimestamp> + * <tablename>,<endkey>,<regionIdTimestamp> * * The old region name format: - * <tablename>,<startkey>,<regionIdTimestamp> + * <tablename>,<endkey>,<regionIdTimestamp> * For region names in the old format, the encoded name is a 32-bit * JenkinsHash integer value (in its decimal notation, string form). *

@@ -77,7 +79,7 @@ implements WritableComparable { /** Separator used to demarcate the encodedName in a region name * in the new format. See description on new format above. */ - private static final int ENC_SEPARATOR = '.'; + public static final int ENC_SEPARATOR = '.'; public static final int MD5_HEX_LENGTH = 32; /** @@ -91,7 +93,7 @@ implements WritableComparable { if ((regionName.length >= 1) && (regionName[regionName.length - 1] == ENC_SEPARATOR)) { // region name is new format. it contains the encoded name. - return true; + return true; } return false; } @@ -104,7 +106,7 @@ implements WritableComparable { String encodedName; if (hasEncodedName(regionName)) { // region is in new format: - // ,,/encodedName/ + // ,,/encodedName/ encodedName = Bytes.toString(regionName, regionName.length - MD5_HEX_LENGTH - 1, MD5_HEX_LENGTH); @@ -137,6 +139,13 @@ implements WritableComparable { /** delimiter used between portions of a region name */ public static final int DELIMITER = ','; + // Most tables end with 0x01 but the last region, which + // should have a null end row, is marked with 0x02 to make + // searching through meta simpler. + public static final int END_OF_TABLE_NAME = 1 + ; + public static final int END_OF_TABLE_NAME_FOR_EMPTY_ENDKEY = END_OF_TABLE_NAME + 1; + /** HRegionInfo for root region */ public static final HRegionInfo ROOT_REGIONINFO = new HRegionInfo(0L, Bytes.toBytes("-ROOT-")); @@ -272,8 +281,7 @@ implements WritableComparable { this.offLine = false; this.regionId = regionid; - this.regionName = createRegionName(this.tableName, startKey, regionId, true); - + this.regionName = createRegionName(this.tableName, startKey, endKey, Long.toString(regionId).getBytes(), true); this.regionNameStr = Bytes.toStringBinary(this.regionName); this.split = split; this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone(); @@ -306,81 +314,97 @@ implements WritableComparable { /** * Make a region name of passed parameters. * @param tableName - * @param startKey Can be null + * @param endKey Can be null * @param regionid Region id (Usually timestamp from when region was created). * @param newFormat should we create the region name in the new format * (such that it contains its encoded name?). - * @return Region name made of passed tableName, startKey and id + * @return Region name made of passed tableName, endKey and id */ public static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final long regionid, boolean newFormat) { - return createRegionName(tableName, startKey, Long.toString(regionid), newFormat); + final byte [] endKey, final long regionid, boolean newFormat) { + return createRegionName(tableName, null, endKey, Long.toString(regionid).getBytes(), newFormat); } - /** - * Make a region name of passed parameters. - * @param tableName - * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). - * @return Region name made of passed tableName, startKey and id - */ - public static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final String id, boolean newFormat) { - return createRegionName(tableName, startKey, Bytes.toBytes(id), newFormat); - } /** * Make a region name of passed parameters. + * * @param tableName - * @param startKey Can be null + * @param startKey + *@param endKey Can be null * @param id Region id (Usually timestamp from when region was created). * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). - * @return Region name made of passed tableName, startKey and id +* (such that it contains its encoded name?). @return Region name made of passed tableName, endKey and id */ - public static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final byte [] id, boolean newFormat) { - byte [] b = new byte [tableName.length + 2 + id.length + - (startKey == null? 0: startKey.length) + - (newFormat ? (MD5_HEX_LENGTH + 2) : 0)]; - - int offset = tableName.length; - System.arraycopy(tableName, 0, b, 0, offset); - b[offset++] = DELIMITER; - if (startKey != null && startKey.length > 0) { - System.arraycopy(startKey, 0, b, offset, startKey.length); - offset += startKey.length; + public static byte [] createRegionName(final byte[] tableName, + byte[] startKey, + final byte[] endKey, + final byte[] id, + boolean newFormat){ + + byte [] oneByte = new byte[1]; + int allocation = tableName == null ? 2 : tableName.length + 2; + allocation += endKey == null ? 1 : endKey.length + 1; + allocation += id == null ? 0 : id.length; + + ByteBuffer byteArrayDataOutput = MappedByteBuffer.allocate(allocation); + byteArrayDataOutput.put(tableName); + + if ( endKey == null || endKey.length <= 0 ) { + oneByte[0] = END_OF_TABLE_NAME_FOR_EMPTY_ENDKEY; + byteArrayDataOutput.put(oneByte); + + oneByte[0] = DELIMITER; + byteArrayDataOutput.put(oneByte); + } else { + oneByte[0] = END_OF_TABLE_NAME; + byteArrayDataOutput.put(oneByte); + + oneByte[0] = DELIMITER; + byteArrayDataOutput.put(oneByte); + + byteArrayDataOutput.put(endKey); } - b[offset++] = DELIMITER; - System.arraycopy(id, 0, b, offset, id.length); - offset += id.length; + byteArrayDataOutput.put(oneByte); + + if (id != null && id.length > 0 ) { + byteArrayDataOutput.put(id); + } + + if (newFormat){ + return addEncoding(byteArrayDataOutput.array(), startKey); + } else { + return byteArrayDataOutput.array(); + } + + } + + private static byte [] addEncoding(final byte[] metaKey, + final byte[] startKey) { + - if (newFormat) { // // Encoded name should be built into the region name. // - // Use the region name thus far (namely, ,,) + // Use the region name thus far (namely, ,,) // to compute a MD5 hash to be used as the encoded name, and append // it to the byte buffer. // - String md5Hash = MD5Hash.getMD5AsHex(b, 0, offset); - byte [] md5HashBytes = Bytes.toBytes(md5Hash); + + byte[] oneByte = new byte[1]; + byte[] md5HashBytes; + oneByte[0] = DELIMITER; + + md5HashBytes = MD5Hash.getMD5AsHex(metaKey).getBytes(); if (md5HashBytes.length != MD5_HEX_LENGTH) { LOG.error("MD5-hash length mismatch: Expected=" + MD5_HEX_LENGTH + - "; Got=" + md5HashBytes.length); + "; Got=" + md5HashBytes.length); } - // now append the bytes '..' to the end - b[offset++] = ENC_SEPARATOR; - System.arraycopy(md5HashBytes, 0, b, offset, MD5_HEX_LENGTH); - offset += MD5_HEX_LENGTH; - b[offset++] = ENC_SEPARATOR; - } - - return b; + oneByte[0] = ENC_SEPARATOR; + byte [] encoding = Bytes.add(oneByte, md5HashBytes, oneByte); + return Bytes.add(metaKey, encoding); } /** @@ -388,7 +412,7 @@ implements WritableComparable { * @param regionName * @return Table name. */ - public static byte [] getTableName(byte [] regionName) { + public static byte [] getTableName(byte[] regionName) { int offset = -1; for (int i = 0; i < regionName.length; i++) { if (regionName[i] == DELIMITER) { @@ -396,22 +420,40 @@ implements WritableComparable { break; } } + + //Remove the end of table marker from the name + if (regionName[offset] == END_OF_TABLE_NAME_FOR_EMPTY_ENDKEY || + regionName[offset] == END_OF_TABLE_NAME) { + offset--; + } + byte [] tableName = new byte[offset]; System.arraycopy(regionName, 0, tableName, 0, offset); return tableName; } + private static boolean IsLHSSplit(final byte [] regionName, final int offset) throws IOException { + + byte last = regionName[offset]; + if (last == END_OF_TABLE_NAME || last == END_OF_TABLE_NAME_FOR_EMPTY_ENDKEY) { + return true; + } else if ( DELIMITER == last) { + throw new IOException("Old Meta format"); + } + return false; + } /** * Separate elements of a regionName. * @param regionName - * @return Array of byte[] containing tableName, startKey and id + * @return Array of byte[] containing tableName, endKey and id * @throws IOException */ public static byte [][] parseRegionName(final byte [] regionName) throws IOException { int offset = -1; + for (int i = 0; i < regionName.length; i++) { - if (regionName[i] == DELIMITER) { + if (IsLHSSplit(regionName, offset)) { offset = i; break; } @@ -427,10 +469,10 @@ implements WritableComparable { } } if(offset == -1) throw new IOException("Invalid regionName format"); - byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; + byte [] endKey = HConstants.EMPTY_BYTE_ARRAY; if(offset != tableName.length + 1) { - startKey = new byte[offset - tableName.length - 1]; - System.arraycopy(regionName, tableName.length + 1, startKey, 0, + endKey = new byte[offset - tableName.length - 1]; + System.arraycopy(regionName, tableName.length + 1, endKey, 0, offset - tableName.length - 1); } byte [] id = new byte[regionName.length - offset - 1]; @@ -438,7 +480,7 @@ implements WritableComparable { regionName.length - offset - 1); byte [][] elements = new byte[3][]; elements[0] = tableName; - elements[1] = startKey; + elements[1] = endKey; elements[2] = id; return elements; } @@ -497,12 +539,12 @@ implements WritableComparable { } /** - * Get current table name of the region + * Get the tablename of the region from the regionKey * @return byte array of table name */ public byte[] getTableName() { if (tableName == null || tableName.length == 0) { - tableName = getTableName(getRegionName()); + tableName = getRegionName(); } return tableName; } diff --git src/main/java/org/apache/hadoop/hbase/KeyValue.java src/main/java/org/apache/hadoop/hbase/KeyValue.java index be7e2d8..5c5dec0 100644 --- src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -26,6 +26,7 @@ import java.nio.ByteBuffer; import java.util.Comparator; import java.util.HashMap; import java.util.Map; +import java.util.UUID; import com.google.common.primitives.Longs; import org.apache.commons.logging.Log; @@ -64,6 +65,7 @@ import org.apache.hadoop.io.Writable; * The column does not contain the family/qualifier delimiter, {@link #COLUMN_FAMILY_DELIMITER} */ public class KeyValue implements Writable, HeapSize { + private static String UUIDMetaTableKey = UUID.nameUUIDFromBytes(Bytes.toBytes(".META.")).toString(); static final Log LOG = LogFactory.getLog(KeyValue.class); // TODO: Group Key-only comparators and operations into a Key class, just // for neatness sake, if can figure what to call it. @@ -1896,12 +1898,21 @@ public class KeyValue implements Writable, HeapSize { // Rows look like this: .META.,ROW_FROM_META,RID // LOG.info("ROOT " + Bytes.toString(left, loffset, llength) + // "---" + Bytes.toString(right, roffset, rlength)); - final int metalength = 7; // '.META.' length + final int metalength = UUIDMetaTableKey.length() + 2; // uuidof('.META.') + end of table + comma length int lmetaOffsetPlusDelimiter = loffset + metalength; + int rmetaOffsetPlusDelimiter = roffset + metalength; + int result; + + if (rlength > 0 && llength > 0) { + result = Bytes.compareTo(left, lmetaOffsetPlusDelimiter - 2, 1, right, rmetaOffsetPlusDelimiter - 2, 1); + //Compare the end of table marker in root + if (result != 0) { + return result; + } + } int leftFarDelimiter = getDelimiterInReverse(left, lmetaOffsetPlusDelimiter, llength - metalength, HRegionInfo.DELIMITER); - int rmetaOffsetPlusDelimiter = roffset + metalength; int rightFarDelimiter = getDelimiterInReverse(right, rmetaOffsetPlusDelimiter, rlength - metalength, HRegionInfo.DELIMITER); @@ -1913,7 +1924,7 @@ public class KeyValue implements Writable, HeapSize { } else if (leftFarDelimiter < 0 && rightFarDelimiter < 0) { return 0; } - int result = super.compareRows(left, lmetaOffsetPlusDelimiter, + result = super.compareRows(left, lmetaOffsetPlusDelimiter, leftFarDelimiter - lmetaOffsetPlusDelimiter, right, rmetaOffsetPlusDelimiter, rightFarDelimiter - rmetaOffsetPlusDelimiter); @@ -1976,11 +1987,15 @@ public class KeyValue implements Writable, HeapSize { // Move past delimiter leftDelimiter++; rightDelimiter++; - int leftFarDelimiter = getRequiredDelimiterInReverse(left, leftDelimiter, - llength - (leftDelimiter - loffset), HRegionInfo.DELIMITER); + int leftFarDelimiter = getRequiredDelimiterInReverse(left, + leftDelimiter, + llength - (leftDelimiter - loffset), + HRegionInfo.DELIMITER); + int rightFarDelimiter = getRequiredDelimiterInReverse(right, - rightDelimiter, rlength - (rightDelimiter - roffset), - HRegionInfo.DELIMITER); + rightDelimiter, + rlength - (rightDelimiter - roffset), + HRegionInfo.DELIMITER); // Now compare middlesection of row. result = super.compareRows(left, leftDelimiter, leftFarDelimiter - leftDelimiter, right, rightDelimiter, diff --git src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java index e5e60a8..705a743 100644 --- src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java +++ src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java @@ -32,11 +32,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; @@ -443,7 +439,8 @@ public class MetaReader { this.results.add(this.current); } }; - fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableNameBytes)); + fullScan(catalogTracker, visitor, + MetaSearchRow.getStartRow(tableName.getBytes(), HConstants.EMPTY_BYTE_ARRAY)); // If visitor has results >= 1 then table exists. return visitor.getResults().size() >= 1; } @@ -502,18 +499,7 @@ public class MetaReader { return Bytes.equals(tableName, current.getTableName()); } - /** - * @param tableName - * @return Place to start Scan in .META. when passed a - * tableName; returns <tableName&rt; <,&rt; <,&rt; - */ - static byte [] getTableStartRowForMeta(final byte [] tableName) { - byte [] startRow = new byte[tableName.length + 2]; - System.arraycopy(tableName, 0, startRow, 0, tableName.length); - startRow[startRow.length - 2] = HRegionInfo.DELIMITER; - startRow[startRow.length - 1] = HRegionInfo.DELIMITER; - return startRow; - } + /** * This method creates a Scan object that will only scan catalog rows that @@ -527,12 +513,11 @@ public class MetaReader { public static Scan getScanForTableName(byte[] tableName) { String strName = Bytes.toString(tableName); // Start key is just the table name with delimiters - byte[] startKey = Bytes.toBytes(strName + ",,"); + byte[] startKey = MetaSearchRow.getStartRow(tableName, HConstants.EMPTY_BYTE_ARRAY); // Stop key appends the smallest possible char to the table name - byte[] stopKey = Bytes.toBytes(strName + " ,,"); + byte[] stopKey = MetaSearchRow.getStopRow(tableName); - Scan scan = new Scan(startKey); - scan.setStopRow(stopKey); + Scan scan = new Scan(startKey, stopKey); return scan; } @@ -597,7 +582,7 @@ public class MetaReader { this.results.add(this.current); } }; - fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableName), + fullScan(catalogTracker, visitor, MetaSearchRow.getStartRow(tableName, HConstants.EMPTY_BYTE_ARRAY), Bytes.equals(tableName, HConstants.META_TABLE_NAME)); return visitor.getResults(); } diff --git src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 6bff130..834ed5f 100644 --- src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -482,7 +482,7 @@ public class HBaseAdmin implements Abortable, Closeable { public void deleteTable(final byte [] tableName) throws IOException { isMasterRunning(); HTableDescriptor.isLegalTableName(tableName); - HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName); + HRegionLocation firstMetaServer = getLastMetaServerForTable(tableName); try { getMaster().deleteTable(tableName); } catch (RemoteException e) { @@ -1507,10 +1507,10 @@ public class HBaseAdmin implements Abortable, Closeable { return getMaster().getClusterStatus(); } - private HRegionLocation getFirstMetaServerForTable(final byte [] tableName) + private HRegionLocation getLastMetaServerForTable(final byte[] tableName) throws IOException { return connection.locateRegion(HConstants.META_TABLE_NAME, - HRegionInfo.createRegionName(tableName, null, HConstants.NINES, false)); + HRegionInfo.createRegionName(tableName, null, null, HConstants.NINES.getBytes(), false)); } /** diff --git src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index 6f19d21..5e1ed28 100644 --- src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -810,8 +810,13 @@ public class HConnectionManager { return null; } } else if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { - return locateRegionInMeta(HConstants.ROOT_TABLE_NAME, tableName, row, - useCache, metaRegionLock); + + //HARD CODED TO POINT TO THE FIRST META TABLE + return locateRegionInMeta(HConstants.ROOT_TABLE_NAME, + HConstants.META_TABLE_NAME, + HConstants.EMPTY_BYTE_ARRAY, + useCache, + metaRegionLock); } else { // Region not in the cache - have to go to the meta RS return locateRegionInMeta(HConstants.META_TABLE_NAME, tableName, row, @@ -879,9 +884,51 @@ public class HConnectionManager { } } + + private HRegionInfo resultToHRegionInfo(final Result result, + byte[] tableName, + byte[] parentTable) + throws IOException { + + if (result == null) { + throw new TableNotFoundException(Bytes.toString(tableName)); + } + + byte[] value = result.getValue(HConstants.CATALOG_FAMILY, + HConstants.REGIONINFO_QUALIFIER); + + if (value == null || value.length == 0) { + throw new IOException("HRegionInfo was null or empty in " + + Bytes.toString(parentTable) + + ", row=" + + result); + } + + HRegionInfo regionInfo = Writables.getHRegionInfo(value); + + // possible we got a region of a different table... + + if (!Bytes.equals(regionInfo.getTableName(), tableName)) { + String errorMsg = "Table '" + Bytes.toString(tableName) + "' was not found, got: "; + if (regionInfo != null) { + errorMsg += Bytes.toString(regionInfo.getTableName()) + "."; + } + throw new TableNotFoundException(errorMsg); + } + + return regionInfo; + } + private boolean finishedScanningForRegion(HRegionInfo regionInfo) { + if (regionInfo == null || regionInfo.isOffline() || regionInfo.isSplit()) { + return false; + } + return true; + } + /* * Search one of the meta tables (-ROOT- or .META.) for the HRegionLocation - * info that contains the table and row we're seeking. + * info that contains the table and row we're seeking. If the row is null or 0 length + * then return a key which scans to the first meta key for the table. */ private HRegionLocation locateRegionInMeta(final byte [] parentTable, final byte [] tableName, final byte [] row, boolean useCache, @@ -897,17 +944,13 @@ public class HConnectionManager { } } - // build the key of the meta region we should be looking for. - // the extra 9's on the end are necessary to allow "exact" matches - // without knowing the precise region names. - byte [] metaKey = HRegionInfo.createRegionName(tableName, row, - HConstants.NINES, false); + final byte [] metaKey = MetaSearchRow.getStartRow(tableName, row); for (int tries = 0; true; tries++) { if (tries >= numRetries) { throw new NoServerForRegionException("Unable to find region for " + Bytes.toStringBinary(row) + " after " + numRetries + " tries."); } - + Result regionInfoRow = null; HRegionLocation metaLocation = null; try { // locate the root or meta region @@ -917,10 +960,11 @@ public class HConnectionManager { HRegionInterface server = getHRegionConnection(metaLocation.getHostname(), metaLocation.getPort()); - Result regionInfoRow = null; // This block guards against two threads trying to load the meta // region at the same time. The first will load the meta region and // the second will use the value that the first one found. + + HRegionInfo regionInfo = null; synchronized (regionLockObject) { // If the parent table is META, we may want to pre-fetch some // region info into the global region cache for this table. @@ -942,46 +986,50 @@ public class HConnectionManager { deleteCachedLocation(tableName, row); } - // Query the root or meta region for the location of the meta region - regionInfoRow = server.getClosestRowBefore( - metaLocation.getRegionInfo().getRegionName(), metaKey, - HConstants.CATALOG_FAMILY); - } - if (regionInfoRow == null) { - throw new TableNotFoundException(Bytes.toString(tableName)); - } - byte [] value = regionInfoRow.getValue(HConstants.CATALOG_FAMILY, - HConstants.REGIONINFO_QUALIFIER); - if (value == null || value.length == 0) { - throw new IOException("HRegionInfo was null or empty in " + - Bytes.toString(parentTable) + ", row=" + regionInfoRow); - } - // convert the row result into the HRegionLocation we need! - HRegionInfo regionInfo = (HRegionInfo) Writables.getWritable( - value, new HRegionInfo()); - // possible we got a region of a different table... - if (!Bytes.equals(regionInfo.getTableName(), tableName)) { - throw new TableNotFoundException( - "Table '" + Bytes.toString(tableName) + "' was not found, got: " + - Bytes.toString(regionInfo.getTableName()) + "."); + byte[] stopRow = MetaSearchRow.getStopRow(tableName); + Scan scan = new Scan(metaKey, stopRow).addFamily(HConstants.CATALOG_FAMILY); + long scannerId = server.openScanner(metaLocation.getRegionInfo().getRegionName(), scan); + + // We always try to get two rows just in case one of them is a split. + Result result = server.next(scannerId); + + // We haven't cleared the meta entry out of the table yet + if (result == null) { + throw new TableNotFoundException("Table '" + Bytes.toString(tableName) + + " we searched for the StartKey: " + Bytes.toString(metaKey) + + " startKey lastChar's int value: " + (int) metaKey[metaKey.length -3] + + " with the stopKey: " + Bytes.toString(stopRow) + + " stopRow lastChar's int value: " + (int) stopRow[stopRow.length -3] + + " with parentTable:" + Bytes.toString(parentTable)); + } else { + regionInfoRow = result; + regionInfo = resultToHRegionInfo(result, tableName, parentTable); + } + } + + if (regionInfo == null) { + throw new TableNotFoundException("Table '" + + Bytes.toString(tableName) + "' was not found, got: " + + Bytes.toString(regionInfo.getTableName()) + "."); } + if (regionInfo.isSplit()) { throw new RegionOfflineException("the only available region for" + - " the required row is a split parent," + - " the daughters should be online soon: " + - regionInfo.getRegionNameAsString()); + " the required row is a split parent," + + " the daughters should be online soon: " + + regionInfo.getRegionNameAsString()); } if (regionInfo.isOffline()) { throw new RegionOfflineException("the region is offline, could" + - " be caused by a disable table call: " + - regionInfo.getRegionNameAsString()); + " be caused by a disable table call: " + + regionInfo.getRegionNameAsString()); } - value = regionInfoRow.getValue(HConstants.CATALOG_FAMILY, - HConstants.SERVER_QUALIFIER); String hostAndPort = ""; - if (value != null) { - hostAndPort = Bytes.toString(value); + if (regionInfoRow.getValue(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER) != null) { + hostAndPort = Bytes.toString(regionInfoRow.getValue(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER)); } if (hostAndPort.equals("")) { throw new NoServerForRegionException("No server address listed " + diff --git src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java index 4135e55..6e82db4 100644 --- src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java +++ src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java @@ -23,21 +23,16 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.Map; import java.util.NavigableMap; import java.util.TreeMap; -import java.util.TreeSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable; -import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Writables; @@ -142,39 +137,17 @@ public class MetaScanner { // if row is not null, we want to use the startKey of the row's region as // the startRow for the meta scan. - byte[] startRow; - if (row != null) { - // Scan starting at a particular row in a particular table - assert tableName != null; - byte[] searchRow = - HRegionInfo.createRegionName(tableName, row, HConstants.NINES, - false); - HTable metaTable = new HTable(configuration, HConstants.META_TABLE_NAME); - Result startRowResult = metaTable.getRowOrBefore(searchRow, - HConstants.CATALOG_FAMILY); - if (startRowResult == null) { - throw new TableNotFoundException("Cannot find row in .META. for table: " - + Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow)); - } - byte[] value = startRowResult.getValue(HConstants.CATALOG_FAMILY, - HConstants.REGIONINFO_QUALIFIER); - if (value == null || value.length == 0) { - throw new IOException("HRegionInfo was null or empty in Meta for " + - Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow)); - } - HRegionInfo regionInfo = Writables.getHRegionInfo(value); + byte[] startRow; + byte[] stopRow; - byte[] rowBefore = regionInfo.getStartKey(); - startRow = HRegionInfo.createRegionName(tableName, rowBefore, - HConstants.ZEROES, false); - } else if (tableName == null || tableName.length == 0) { + if (tableName == null || tableName.length == 0) { // Full META scan startRow = HConstants.EMPTY_START_ROW; + stopRow = null; } else { - // Scan META for an entire table - startRow = HRegionInfo.createRegionName( - tableName, HConstants.EMPTY_START_ROW, HConstants.ZEROES, false); + startRow = MetaSearchRow.getStartRow(tableName, row); + stopRow = MetaSearchRow.getStopRow(tableName); } // Scan over each meta region @@ -182,8 +155,15 @@ public class MetaScanner { int rows = Math.min(rowLimit, configuration.getInt( HConstants.HBASE_META_SCANNER_CACHING, HConstants.DEFAULT_HBASE_META_SCANNER_CACHING)); + do { - final Scan scan = new Scan(startRow).addFamily(HConstants.CATALOG_FAMILY); + Scan scan; + if (stopRow != null) { // Support full meta scans + scan = new Scan(startRow, stopRow).addFamily(HConstants.CATALOG_FAMILY); + } else { + scan = new Scan(startRow).addFamily(HConstants.CATALOG_FAMILY); + } + if (LOG.isDebugEnabled()) { LOG.debug("Scanning " + Bytes.toString(metaTableName) + " starting at row=" + Bytes.toStringBinary(startRow) + " for max=" + diff --git src/main/java/org/apache/hadoop/hbase/client/MetaSearchRow.java src/main/java/org/apache/hadoop/hbase/client/MetaSearchRow.java new file mode 100644 index 0000000..e096d0c --- /dev/null +++ src/main/java/org/apache/hadoop/hbase/client/MetaSearchRow.java @@ -0,0 +1,62 @@ +/** + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; + +public class MetaSearchRow { + static final Log LOG = LogFactory.getLog(MetaSearchRow.class); + + public static byte[] getStopRow(final byte[] tableName){ + byte [] b = new byte [tableName.length + 3 + HConstants.NINES.getBytes().length ]; + + + int offset = tableName.length; + System.arraycopy(tableName, 0, b, 0, offset); + b[offset++] = (byte) (HRegionInfo.END_OF_TABLE_NAME_FOR_EMPTY_ENDKEY + 1); + b[offset++] = HRegionInfo.DELIMITER; + b[offset++] = HRegionInfo.DELIMITER; + System.arraycopy(HConstants.NINES.getBytes(), 0, b, offset, HConstants.NINES.getBytes().length); + return b; + } + + public static byte[] getStartRow(final byte[] tableName, final byte[] searchRow) { + // Get first region in META + + if (searchRow == null || searchRow.length == 0){ + byte[] startRow = new byte[tableName.length + 3 + HConstants.ZEROES.getBytes().length]; + System.arraycopy(tableName, 0, startRow, 0, tableName.length); + startRow[tableName.length] = HRegionInfo.END_OF_TABLE_NAME - 1 ; + startRow[tableName.length+1] = HRegionInfo.DELIMITER; + startRow[tableName.length+2] = HRegionInfo.DELIMITER; + System.arraycopy(HConstants.ZEROES.getBytes(), 0, startRow, tableName.length + 3, HConstants.ZEROES.getBytes().length); + return startRow; + } + + return HRegionInfo.createRegionName(tableName, + null, + searchRow, + HConstants.NINES.getBytes(), + false); + } +} diff --git src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java index bf85bc1..be5db4c 100644 --- src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java +++ src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java @@ -21,8 +21,8 @@ package org.apache.hadoop.hbase.rest; import java.io.IOException; -import java.net.InetSocketAddress; import java.util.Map; +import java.util.NavigableMap; import javax.ws.rs.GET; import javax.ws.rs.Produces; @@ -37,7 +37,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerAddress; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTableInterface; @@ -67,12 +67,12 @@ public class RegionsResource extends ResourceBase { this.tableResource = tableResource; } - private Map getTableRegions() + private NavigableMap getTableRegions() throws IOException { HTablePool pool = servlet.getTablePool(); HTableInterface table = pool.getTable(tableResource.getName()); try { - return ((HTable)table).getRegionsInfo(); + return ((HTable)table).getRegionLocations(); } finally { pool.putTable(table); } @@ -88,15 +88,14 @@ public class RegionsResource extends ResourceBase { try { String tableName = tableResource.getName(); TableInfoModel model = new TableInfoModel(tableName); - Map regions = getTableRegions(); - for (Map.Entry e: regions.entrySet()) { + NavigableMap regions = getTableRegions(); + for (Map.Entry e: regions.entrySet()) { HRegionInfo hri = e.getKey(); - HServerAddress addr = e.getValue(); - InetSocketAddress sa = addr.getInetSocketAddress(); + ServerName addr = e.getValue(); model.add( new TableRegionModel(tableName, hri.getRegionId(), hri.getStartKey(), hri.getEndKey(), - sa.getHostName() + ":" + Integer.valueOf(sa.getPort()))); + addr.getHostname() + ":" + Integer.valueOf(addr.getPort()))); } ResponseBuilder response = Response.ok(model); response.cacheControl(cacheControl); diff --git src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java index 67e7a04..17f699f 100644 --- src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java +++ src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java @@ -66,8 +66,7 @@ public class TableRegionModel implements Serializable { * @param startKey the start key of the region * @param endKey the end key of the region */ - public TableRegionModel(String table, long id, byte[] startKey, - byte[] endKey) { + public TableRegionModel(String table, long id, byte[] startKey, byte[] endKey) { this(table, id, startKey, endKey, null); } @@ -79,8 +78,7 @@ public class TableRegionModel implements Serializable { * @param endKey the end key of the region * @param location the name and port of the region server hosting the region */ - public TableRegionModel(String table, long id, byte[] startKey, - byte[] endKey, String location) { + public TableRegionModel(String table, long id, byte[] startKey, byte[] endKey, String location) { this.table = table; this.id = id; this.startKey = startKey; @@ -95,9 +93,11 @@ public class TableRegionModel implements Serializable { public String getName() { byte [] tableNameAsBytes = Bytes.toBytes(this.table); byte [] nameAsBytes = HRegionInfo.createRegionName(tableNameAsBytes, - this.startKey, this.id, - !HTableDescriptor.isMetaTable(tableNameAsBytes)); - return Bytes.toString(nameAsBytes); + this.startKey, + this.endKey, + Long.toString(this.id).getBytes(), + !HTableDescriptor.isMetaTable(tableNameAsBytes)); + return Bytes.toStringBinary(nameAsBytes); } /** @@ -109,6 +109,14 @@ public class TableRegionModel implements Serializable { } /** + * @return the table name + */ + @XmlAttribute + public String getTable() { + return table; + } + + /** * @return the start key */ @XmlAttribute @@ -133,15 +141,10 @@ public class TableRegionModel implements Serializable { } /** - * @param name region printable name + * @param table the table name */ - public void setName(String name) { - String split[] = name.split(","); - this.table = split[0]; - this.startKey = Bytes.toBytes(split[1]); - String tail = split[2]; - split = tail.split("\\."); - id = Long.valueOf(split[0]); + public void setTable(String table) { + this.table = table; } /** @@ -185,6 +188,8 @@ public class TableRegionModel implements Serializable { sb.append(Bytes.toString(startKey)); sb.append("'\n endKey='"); sb.append(Bytes.toString(endKey)); + sb.append("'\n table='"); + sb.append(table); if (location != null) { sb.append("'\n location='"); sb.append(location); diff --git src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 6fca020..0512d70 100644 --- src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -47,15 +47,7 @@ import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -67,7 +59,6 @@ import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.Keying; import org.apache.hadoop.hbase.util.RegionSplitter; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Writables; @@ -1160,15 +1151,14 @@ public class HBaseTestingUtility { // TODO: Redo using MetaReader. HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME); List rows = new ArrayList(); - ResultScanner s = t.getScanner(new Scan()); + byte[] startRow = MetaSearchRow.getStartRow(tableName, HConstants.EMPTY_BYTE_ARRAY); + byte[] stopRow = MetaSearchRow.getStopRow(tableName); + Scan scan = new Scan(startRow,stopRow).addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + ResultScanner s = t.getScanner(scan); for (Result result : s) { - HRegionInfo info = Writables.getHRegionInfo( - result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER)); - if (Bytes.compareTo(info.getTableName(), tableName) == 0) { - LOG.info("getMetaTableRows: row -> " + - Bytes.toStringBinary(result.getRow())); - rows.add(result.getRow()); - } + LOG.info("getMetaTableRows: row -> " + + Bytes.toStringBinary(result.getRow())); + rows.add(result.getRow()); } s.close(); t.close(); diff --git src/test/java/org/apache/hadoop/hbase/TestKeyValue.java src/test/java/org/apache/hadoop/hbase/TestKeyValue.java index dc4ee8d..e672763 100644 --- src/test/java/org/apache/hadoop/hbase/TestKeyValue.java +++ src/test/java/org/apache/hadoop/hbase/TestKeyValue.java @@ -20,16 +20,21 @@ package org.apache.hadoop.hbase; import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; import java.util.Set; import java.util.TreeSet; import junit.framework.TestCase; +import org.apache.commons.collections.iterators.EmptyMapIterator; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValue.MetaComparator; import org.apache.hadoop.hbase.KeyValue.Type; +import org.apache.hadoop.hbase.client.MetaSearchRow; +import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.util.Bytes; import org.junit.experimental.categories.Category; @@ -52,8 +57,8 @@ public class TestKeyValue extends TestCase { assertTrue(aaa.matchingColumn(family2,qualifier2)); byte [] nullQualifier = new byte[0]; aaa = new KeyValue(a, family1, nullQualifier, 0L, Type.Put, a); - assertTrue(aaa.matchingColumn(family1,null)); - assertFalse(aaa.matchingColumn(family2,qualifier2)); + assertTrue(aaa.matchingColumn(family1, null)); + assertFalse(aaa.matchingColumn(family2, qualifier2)); } public void testBasics() throws Exception { @@ -75,6 +80,21 @@ public class TestKeyValue extends TestCase { LOG.info(kv.toString()); } + public void testTroublesomeRow() { + long now = System.currentTimeMillis(); + + final byte[] a = HRegionInfo.createRegionName(Bytes.toBytes("foo"), + Bytes.toBytes("foo\u0000"), + 0, + true); + final byte[] b = HRegionInfo.createRegionName(Bytes.toBytes("foo"), + Bytes.toBytes("foo"), + 0, + true); + assertTrue(KeyValue.META_COMPARATOR.compare(new KeyValue(a, now), new KeyValue(b, now)) > 0); + + } + public void testPlainCompare() throws Exception { final byte [] a = Bytes.toBytes("aaa"); final byte [] b = Bytes.toBytes("bbb"); @@ -115,34 +135,70 @@ public class TestKeyValue extends TestCase { public void testMoreComparisons() throws Exception { // Root compares + + byte[] endRow = HRegionInfo.createRegionName(Bytes.toBytes(".META."), + HConstants.EMPTY_BYTE_ARRAY, + 99999999999999l, + false); + + byte[] startRow = HRegionInfo.createRegionName(Bytes.toBytes(".META."), + HConstants.EMPTY_BYTE_ARRAY, + 1l, + false); + long now = System.currentTimeMillis(); - KeyValue a = new KeyValue(Bytes.toBytes(".META.,,99999999999999"), now); - KeyValue b = new KeyValue(Bytes.toBytes(".META.,,1"), now); + KeyValue a = new KeyValue(endRow, now); + KeyValue b = new KeyValue(startRow, now); KVComparator c = new KeyValue.RootComparator(); assertTrue(c.compare(b, a) < 0); - KeyValue aa = new KeyValue(Bytes.toBytes(".META.,,1"), now); - KeyValue bb = new KeyValue(Bytes.toBytes(".META.,,1"), - Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1235943454602L, - (byte[])null); + KeyValue aa = new KeyValue(startRow, now); + KeyValue bb = new KeyValue(startRow, + Bytes.toBytes("info"), + Bytes.toBytes("regioninfo"), + 1235943454602L, + (byte[])null); assertTrue(c.compare(aa, bb) < 0); - // Meta compares - KeyValue aaa = new KeyValue( - Bytes.toBytes("TestScanMultipleVersions,row_0500,1236020145502"), now); - KeyValue bbb = new KeyValue( - Bytes.toBytes("TestScanMultipleVersions,,99999999999999"), now); - c = new KeyValue.MetaComparator(); - assertTrue(c.compare(bbb, aaa) < 0); + // Meta compares + byte [] testScanMultipleVersion = Bytes.toBytes("TestScanMultipleVersions"); +// Bytes.toBytes("TestScanMultipleVersions,row_0500,1236020145502"), now); +// Bytes.toBytes("TestScanMultipleVersions,,99999999999999"), now); - KeyValue aaaa = new KeyValue(Bytes.toBytes("TestScanMultipleVersions,,1236023996656"), - Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1236024396271L, - (byte[])null); + byte[] tsmRegion = HRegionInfo.createRegionName(testScanMultipleVersion, Bytes.toBytes("row_0500"), 1236020145502l, false); + startRow = HRegionInfo.createRegionName(testScanMultipleVersion, HConstants.EMPTY_BYTE_ARRAY, 99999999999999l, false); + + KeyValue aaa = new KeyValue(tsmRegion, now); + KeyValue bbb = new KeyValue(startRow, now); + c = new KeyValue.MetaComparator(); + assertTrue(c.compare(bbb, aaa) > 0); + + // Meta compares + + + //Bytes.toBytes("TestScanMultipleVersions"),,1236023996656") + byte[] tscmvRegionEmpty = HRegionInfo.createRegionName(testScanMultipleVersion, + HConstants.EMPTY_BYTE_ARRAY, + 1236023996656l, + true); + + //"TestScanMultipleVersions,row_0500,1236034574162" + byte[] tscmvRegion500 = HRegionInfo.createRegionName(testScanMultipleVersion, + Bytes.toBytes("row_0500"), + 1236023996656l, + true); + KeyValue aaaa = new KeyValue(tscmvRegionEmpty, + Bytes.toBytes("info"), + Bytes.toBytes("regioninfo"), + 1236024396271L, + (byte[])null); assertTrue(c.compare(aaaa, bbb) < 0); - KeyValue x = new KeyValue(Bytes.toBytes("TestScanMultipleVersions,row_0500,1236034574162"), - Bytes.toBytes("info"), Bytes.toBytes(""), 9223372036854775807L, - (byte[])null); - KeyValue y = new KeyValue(Bytes.toBytes("TestScanMultipleVersions,row_0500,1236034574162"), + KeyValue x = new KeyValue(tscmvRegion500, + Bytes.toBytes("info"), + Bytes.toBytes(""), + 9223372036854775807L, + (byte[])null); + KeyValue y = new KeyValue(tscmvRegion500, Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1236034574912L, (byte[])null); assertTrue(c.compare(x, y) < 0); @@ -187,74 +243,141 @@ public class TestKeyValue extends TestCase { public void testKeyValueBorderCases() throws IOException { // % sorts before , so if we don't do special comparator, rowB would // come before rowA. - KeyValue rowA = new KeyValue(Bytes.toBytes("testtable,www.hbase.org/,1234"), - Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); - KeyValue rowB = new KeyValue(Bytes.toBytes("testtable,www.hbase.org/%20,99999"), - Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); + + byte[] metaRowA = HRegionInfo.createRegionName(Bytes.toBytes("testtable"), + Bytes.toBytes("www.hbase.org/"), + 1234, + false); + byte[] metaRowB = HRegionInfo.createRegionName(Bytes.toBytes("testtable"), + Bytes.toBytes("www.hbase.org/%20"), + 99999, + false); + + + KeyValue rowA = new KeyValue(metaRowA, Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); + KeyValue rowB = new KeyValue(metaRowB, Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); assertTrue(KeyValue.META_COMPARATOR.compare(rowA, rowB) < 0); - rowA = new KeyValue(Bytes.toBytes("testtable,,1234"), Bytes.toBytes("fam"), + + metaRowA = HRegionInfo.createRegionName(Bytes.toBytes("testtable"), + HConstants.EMPTY_BYTE_ARRAY, + 1234, + true); + + metaRowB = HRegionInfo.createRegionName(Bytes.toBytes("testtable"), + Bytes.toBytes("www.hbase.org/%20"), + 99999, + true); + rowA = new KeyValue(metaRowA, Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); - rowB = new KeyValue(Bytes.toBytes("testtable,$www.hbase.org/,99999"), + rowB = new KeyValue(metaRowB, Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); - assertTrue(KeyValue.META_COMPARATOR.compare(rowA, rowB) < 0); + assertTrue(KeyValue.META_COMPARATOR.compare(rowA, rowB) > 0); + + metaRowA = HRegionInfo.createRegionName(Bytes.toBytes(".META."), + Bytes.toBytes("testtable,www.hbase.org/,1234"), + 1234, + true); - rowA = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/,1234,4321"), + metaRowB = HRegionInfo.createRegionName(Bytes.toBytes(".META."), + Bytes.toBytes("testtable,www.hbase.org/%20,99999"), + 99999, + true); + + rowA = new KeyValue(metaRowA, Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); - rowB = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/%20,99999,99999"), + rowB = new KeyValue(metaRowB, Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); assertTrue(KeyValue.ROOT_COMPARATOR.compare(rowA, rowB) < 0); } private void metacomparisons(final KeyValue.MetaComparator c) { long now = System.currentTimeMillis(); - assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now), - new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now)) == 0); - KeyValue a = new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now); - KeyValue b = new KeyValue(Bytes.toBytes(".META.,a,,0,2"), now); + + byte[] metaRowA = HRegionInfo.createRegionName(Bytes.toBytes(".META."), + Bytes.toBytes("a,,0"), + 1, + true); + + byte[] metaRowB = HRegionInfo.createRegionName(Bytes.toBytes(".META."), + Bytes.toBytes("a,,0"), + 2, + true); + + + assertTrue(c.compare(new KeyValue(metaRowA, now), + new KeyValue(metaRowA, now)) == 0); + KeyValue a = new KeyValue(metaRowA, now); + KeyValue b = new KeyValue(metaRowB, now); assertTrue(c.compare(a, b) < 0); - assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,a,,0,2"), now), - new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now)) > 0); + assertTrue(c.compare(new KeyValue(metaRowB, now), + new KeyValue(metaRowA, now)) > 0); } private void comparisons(final KeyValue.KVComparator c) { long now = System.currentTimeMillis(); - assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,,1"), now), - new KeyValue(Bytes.toBytes(".META.,,1"), now)) == 0); - assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,,1"), now), - new KeyValue(Bytes.toBytes(".META.,,2"), now)) < 0); - assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,,2"), now), - new KeyValue(Bytes.toBytes(".META.,,1"), now)) > 0); + byte[] metaRowA = HRegionInfo.createRegionName(Bytes.toBytes(".META."), + HConstants.EMPTY_BYTE_ARRAY, + 1, + true); + byte[] metaRowB = HRegionInfo.createRegionName(Bytes.toBytes(".META."), + HConstants.EMPTY_BYTE_ARRAY, + 2, + true); + + assertTrue(c.compare(new KeyValue(metaRowA, now), + new KeyValue(metaRowA, now)) == 0); + assertTrue(c.compare(new KeyValue(metaRowA, now), + new KeyValue(metaRowB, now)) < 0); + assertTrue(c.compare(new KeyValue(metaRowB, now), + new KeyValue(metaRowA, now)) > 0); } public void testBinaryKeys() throws Exception { - Set set = new TreeSet(KeyValue.COMPARATOR); + Set set; final byte [] fam = Bytes.toBytes("col"); final byte [] qf = Bytes.toBytes("umn"); final byte [] nb = new byte[0]; - KeyValue [] keys = {new KeyValue(Bytes.toBytes("aaaaa,\u0000\u0000,2"), fam, qf, 2, nb), - new KeyValue(Bytes.toBytes("aaaaa,\u0001,3"), fam, qf, 3, nb), - new KeyValue(Bytes.toBytes("aaaaa,,1"), fam, qf, 1, nb), - new KeyValue(Bytes.toBytes("aaaaa,\u1000,5"), fam, qf, 5, nb), - new KeyValue(Bytes.toBytes("aaaaa,a,4"), fam, qf, 4, nb), - new KeyValue(Bytes.toBytes("a,a,0"), fam, qf, 0, nb), + + byte[] metaRow0 = HRegionInfo.createRegionName(Bytes.toBytes("a"), + Bytes.toBytes("a"), + 0, + false); + + byte[] metaRow1 = HRegionInfo.createRegionName(Bytes.toBytes("aaaaa"), + Bytes.toBytes("\u0000\u0000"), + 1, + false); + + byte[] metaRow2 = HRegionInfo.createRegionName(Bytes.toBytes("aaaaa"), + Bytes.toBytes(","), + 2, + false); + + byte[] metaRow3 = HRegionInfo.createRegionName(Bytes.toBytes("aaaaa"), + Bytes.toBytes("a"), + 3, + false); + + byte[] metaRow4 = HRegionInfo.createRegionName(Bytes.toBytes("aaaaa"), + Bytes.toBytes("\u1000"), + 4, + false); + + byte[] metaRow5 = HRegionInfo.createRegionName(Bytes.toBytes("aaaaa"), + HConstants.EMPTY_BYTE_ARRAY, + 5, + false); + + KeyValue [] keys = { + new KeyValue(metaRow2, fam, qf, 2, nb), + new KeyValue(metaRow3, fam, qf, 3, nb), + new KeyValue(metaRow1, fam, qf, 1, nb), + new KeyValue(metaRow5, fam, qf, 5, nb), + new KeyValue(metaRow4, fam, qf, 4, nb), + new KeyValue(metaRow0, fam, qf, 0, nb), }; - // Add to set with bad comparator - for (int i = 0; i < keys.length; i++) { - set.add(keys[i]); - } - // This will output the keys incorrectly. - boolean assertion = false; - int count = 0; - try { - for (KeyValue k: set) { - assertTrue(count++ == k.getTimestamp()); - } - } catch (junit.framework.AssertionFailedError e) { - // Expected - assertion = true; - } - assertTrue(assertion); + int count; // Make set with good comparator set = new TreeSet(new KeyValue.MetaComparator()); for (int i = 0; i < keys.length; i++) { @@ -265,21 +388,51 @@ public class TestKeyValue extends TestCase { assertTrue(count++ == k.getTimestamp()); } // Make up -ROOT- table keys. - KeyValue [] rootKeys = { - new KeyValue(Bytes.toBytes(".META.,aaaaa,\u0000\u0000,0,2"), fam, qf, 2, nb), - new KeyValue(Bytes.toBytes(".META.,aaaaa,\u0001,0,3"), fam, qf, 3, nb), - new KeyValue(Bytes.toBytes(".META.,aaaaa,,0,1"), fam, qf, 1, nb), - new KeyValue(Bytes.toBytes(".META.,aaaaa,\u1000,0,5"), fam, qf, 5, nb), - new KeyValue(Bytes.toBytes(".META.,aaaaa,a,0,4"), fam, qf, 4, nb), - new KeyValue(Bytes.toBytes(".META.,,0"), fam, qf, 0, nb), - }; + byte[] metaTable = Bytes.toBytes(".META"); + metaRow1 = HRegionInfo.createRegionName(metaTable, + Bytes.toBytes("aaaaa,\u0000\u0000,0"), + 1, + true); + + metaRow2 = HRegionInfo.createRegionName(metaTable, + Bytes.toBytes("aaaaa,\u0001,0"), + 2, + true); + metaRow0 = HRegionInfo.createRegionName(metaTable, + Bytes.toBytes("aaaaa,,0"), + 0, + true); + + metaRow4 = HRegionInfo.createRegionName(metaTable, + Bytes.toBytes("aaaaa,\u1000,0"), + 4, + true); + + metaRow3 = HRegionInfo.createRegionName(metaTable, + Bytes.toBytes("aaaaa,a,0"), + 3, + true); + metaRow5 = HRegionInfo.createRegionName(metaTable, + HConstants.EMPTY_BYTE_ARRAY, + 5, + true); + + + KeyValue[] rootKeys = { + new KeyValue(metaRow2, fam, qf, 2, nb), + new KeyValue(metaRow3, fam, qf, 3, nb), + new KeyValue(metaRow1, fam, qf, 1, nb), + new KeyValue(metaRow5, fam, qf, 5, nb), + new KeyValue(metaRow4, fam, qf, 4, nb), + new KeyValue(metaRow0, fam, qf, 0, nb), + }; // This will output the keys incorrectly. set = new TreeSet(new KeyValue.MetaComparator()); // Add to set with bad comparator for (int i = 0; i < keys.length; i++) { set.add(rootKeys[i]); } - assertion = false; + boolean assertion = false; count = 0; try { for (KeyValue k: set) { diff --git src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index 95712dd..81e109e 100644 --- src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -26,13 +26,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; +import java.util.*; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -1171,7 +1165,6 @@ public class TestAdmin { public void testCloseRegionIfInvalidRegionNameIsPassed() throws Exception { byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegion1"); createTableWithDefaultConf(TABLENAME); - HRegionInfo info = null; HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLENAME); List onlineRegions = rs.getOnlineRegions(); @@ -1199,10 +1192,9 @@ public class TestAdmin { List onlineRegions = rs.getOnlineRegions(); for (HRegionInfo regionInfo : onlineRegions) { if (!regionInfo.isMetaTable()) { - if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegion2")) { info = regionInfo; - admin.closeRegion(regionInfo.getRegionNameAsString(), rs + admin.closeRegion(regionInfo.getRegionName(), rs .getServerName().getServerName()); } } @@ -1223,6 +1215,7 @@ public class TestAdmin { public void testCloseRegionWhenServerNameIsNull() throws Exception { byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegion3"); createTableWithDefaultConf(TABLENAME); + HBaseAdmin admin = createTable(TABLENAME); HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLENAME); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index 5f97167..f0f88a5 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -55,88 +55,6 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { private static final byte[] T35 = Bytes.toBytes("035"); private static final byte[] T40 = Bytes.toBytes("040"); - - - public void testUsingMetaAndBinary() throws IOException { - FileSystem filesystem = FileSystem.get(conf); - Path rootdir = testDir; - // Up flush size else we bind up when we use default catalog flush of 16k. - HTableDescriptor.META_TABLEDESC.setMemStoreFlushSize(64 * 1024 * 1024); - - HRegion mr = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, - rootdir, this.conf, HTableDescriptor.META_TABLEDESC); - try { - // Write rows for three tables 'A', 'B', and 'C'. - for (char c = 'A'; c < 'D'; c++) { - HTableDescriptor htd = new HTableDescriptor("" + c); - final int last = 128; - final int interval = 2; - for (int i = 0; i <= last; i += interval) { - HRegionInfo hri = new HRegionInfo(htd.getName(), - i == 0? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i), - i == last? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i + interval)); - Put put = new Put(hri.getRegionName()); - put.setWriteToWAL(false); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, - Writables.getBytes(hri)); - mr.put(put, false); - } - } - InternalScanner s = mr.getScanner(new Scan()); - try { - List keys = new ArrayList(); - while(s.next(keys)) { - LOG.info(keys); - keys.clear(); - } - } finally { - s.close(); - } - findRow(mr, 'C', 44, 44); - findRow(mr, 'C', 45, 44); - findRow(mr, 'C', 46, 46); - findRow(mr, 'C', 43, 42); - mr.flushcache(); - findRow(mr, 'C', 44, 44); - findRow(mr, 'C', 45, 44); - findRow(mr, 'C', 46, 46); - findRow(mr, 'C', 43, 42); - // Now delete 'C' and make sure I don't get entries from 'B'. - byte [] firstRowInC = HRegionInfo.createRegionName(Bytes.toBytes("" + 'C'), - HConstants.EMPTY_BYTE_ARRAY, HConstants.ZEROES, false); - Scan scan = new Scan(firstRowInC); - s = mr.getScanner(scan); - try { - List keys = new ArrayList(); - while (s.next(keys)) { - mr.delete(new Delete(keys.get(0).getRow()), null, false); - keys.clear(); - } - } finally { - s.close(); - } - // Assert we get null back (pass -1). - findRow(mr, 'C', 44, -1); - findRow(mr, 'C', 45, -1); - findRow(mr, 'C', 46, -1); - findRow(mr, 'C', 43, -1); - mr.flushcache(); - findRow(mr, 'C', 44, -1); - findRow(mr, 'C', 45, -1); - findRow(mr, 'C', 46, -1); - findRow(mr, 'C', 43, -1); - } finally { - if (mr != null) { - try { - mr.close(); - } catch (Exception e) { - e.printStackTrace(); - } - mr.getLog().closeAndDelete(); - } - } - } - /* * @param mr * @param table @@ -152,7 +70,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { // Find the row. byte [] tofindBytes = Bytes.toBytes((short)rowToFind); byte [] metaKey = HRegionInfo.createRegionName(tableb, tofindBytes, - HConstants.NINES, false); + 99999999999999l, false); LOG.info("find=" + new String(metaKey)); Result r = mr.getClosestRowBefore(metaKey); if (answer == -1) { diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java index 6e1211b..f507fa0 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java @@ -30,7 +30,6 @@ import java.io.IOException; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; -import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.MD5Hash; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -41,23 +40,26 @@ public class TestHRegionInfo { public void testCreateHRegionInfoName() throws Exception { String tableName = "tablename"; final byte [] tn = Bytes.toBytes(tableName); - String startKey = "startkey"; - final byte [] sk = Bytes.toBytes(startKey); + String endKey = "endkey"; + final byte [] ek = Bytes.toBytes(endKey); String id = "id"; // old format region name - byte [] name = HRegionInfo.createRegionName(tn, sk, id, false); + byte [] name = HRegionInfo.createRegionName(tn, null, ek, id.getBytes(), false); String nameStr = Bytes.toString(name); - assertEquals(tableName + "," + startKey + "," + id, nameStr); + assertEquals(tableName + (char)HRegionInfo.END_OF_TABLE_NAME + (char)HRegionInfo.DELIMITER + + endKey + (char)HRegionInfo.DELIMITER + id, nameStr); // new format region name. String md5HashInHex = MD5Hash.getMD5AsHex(name); assertEquals(HRegionInfo.MD5_HEX_LENGTH, md5HashInHex.length()); - name = HRegionInfo.createRegionName(tn, sk, id, true); + name = HRegionInfo.createRegionName(tn, null, ek, id.getBytes(), true); nameStr = Bytes.toString(name); - assertEquals(tableName + "," + startKey + "," - + id + "." + md5HashInHex + ".", + assertEquals(tableName + (char)HRegionInfo.END_OF_TABLE_NAME + + (char)HRegionInfo.DELIMITER + endKey + + (char)HRegionInfo.DELIMITER + id + + (char)HRegionInfo.ENC_SEPARATOR + md5HashInHex + (char)HRegionInfo.ENC_SEPARATOR, nameStr); } diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java index a092cf0..049a499 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java @@ -493,37 +493,6 @@ public class TestMemStore extends TestCase { m.kvset.size(), m.kvset.size() == 3); } - public void testBinary() throws IOException { - MemStore mc = new MemStore(new Configuration(), KeyValue.ROOT_COMPARATOR); - final int start = 43; - final int end = 46; - for (int k = start; k <= end; k++) { - byte [] kk = Bytes.toBytes(k); - byte [] row = - Bytes.toBytes(".META.,table," + Bytes.toString(kk) + ",1," + k); - KeyValue key = new KeyValue(row, CONTENTS, BASIC, - System.currentTimeMillis(), - (CONTENTSTR + k).getBytes(HConstants.UTF8_ENCODING)); - mc.add(key); - System.out.println(key); -// key = new KeyValue(row, Bytes.toBytes(ANCHORNUM + k), -// System.currentTimeMillis(), -// (ANCHORSTR + k).getBytes(HConstants.UTF8_ENCODING)); -// mc.add(key); -// System.out.println(key); - } - int index = start; - for (KeyValue kv: mc.kvset) { - System.out.println(kv); - byte [] b = kv.getRow(); - // Hardcoded offsets into String - String str = Bytes.toString(b, 13, 4); - byte [] bb = Bytes.toBytes(index); - String bbStr = Bytes.toString(bb); - assertEquals(str, bbStr); - index++; - } - } ////////////////////////////////////////////////////////////////////////////// // Get tests diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 1997abd..3946a35 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -198,9 +198,9 @@ public class TestSplitTransactionOnCluster { hri, new ServerName("any.old.server", 1234, -1)); // Now try splitting.... should fail. And each should successfully // rollback. - this.admin.split(hri.getRegionNameAsString()); - this.admin.split(hri.getRegionNameAsString()); - this.admin.split(hri.getRegionNameAsString()); + this.admin.split(hri.getRegionName()); + this.admin.split(hri.getRegionName()); + this.admin.split(hri.getRegionName()); // Wait around a while and assert count of regions remains constant. for (int i = 0; i < 10; i++) { Thread.sleep(100); @@ -356,7 +356,7 @@ public class TestSplitTransactionOnCluster { private void split(final HRegionInfo hri, final HRegionServer server, final int regionCount) throws IOException, InterruptedException { - this.admin.split(hri.getRegionNameAsString()); + this.admin.split(hri.getRegionName()); while (server.getOnlineRegions().size() <= regionCount) { LOG.debug("Waiting on region to split"); Thread.sleep(100); diff --git src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java index b6f0ab5..754c04b 100644 --- src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java +++ src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java @@ -45,7 +45,8 @@ public class TestTableRegionModel extends TestCase { " endKey=\"enp5eng=\"" + " startKey=\"YWJyYWNhZGJyYQ==\"" + " id=\"8731042424\"" + - " name=\"testtable,abracadbra,8731042424\"/>"; + " table=\"testtable\"" + + " name=\"testtable,zzyzx,8731042424\"/>"; private JAXBContext context; @@ -55,9 +56,7 @@ public class TestTableRegionModel extends TestCase { } private TableRegionModel buildTestModel() { - TableRegionModel model = - new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION); - return model; + return new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION); } @SuppressWarnings("unused") @@ -77,9 +76,12 @@ public class TestTableRegionModel extends TestCase { assertTrue(Bytes.equals(model.getEndKey(), END_KEY)); assertEquals(model.getId(), ID); assertEquals(model.getLocation(), LOCATION); - assertEquals(model.getName(), - TABLE + "," + Bytes.toString(START_KEY) + "," + Long.toString(ID) + - ".ad9860f031282c46ed431d7af8f94aca."); + byte[] regionInfo = HRegionInfo.createRegionName(TABLE.getBytes(), + START_KEY, + END_KEY, + Long.toString(ID).getBytes(), + true); + assertEquals(model.getName(), Bytes.toStringBinary(regionInfo)); } public void testBuildModel() throws Exception { @@ -90,17 +92,13 @@ public class TestTableRegionModel extends TestCase { TableRegionModel model = buildTestModel(); String modelName = model.getName(); HRegionInfo hri = new HRegionInfo(Bytes.toBytes(TABLE), - START_KEY, END_KEY, false, ID); + START_KEY, + END_KEY, + false, + ID); assertEquals(modelName, hri.getRegionNameAsString()); } - public void testSetName() { - TableRegionModel model = buildTestModel(); - String name = model.getName(); - model.setName(name); - assertEquals(name, model.getName()); - } - public void testFromXML() throws Exception { checkModel(fromXML(AS_XML)); } -- 1.7.4.4