diff --git src/main/java/org/apache/hadoop/hbase/HConstants.java src/main/java/org/apache/hadoop/hbase/HConstants.java index 3c83846..2604c76 100644 --- src/main/java/org/apache/hadoop/hbase/HConstants.java +++ src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -236,7 +236,7 @@ public final class HConstants { // Always store the location of the root table's HRegion. // This HRegion is never split. - // region name = table + startkey + regionid. This is the row key. + // region name = table + endkey + regionid. This is the row key. // each row in the root and meta tables describes exactly 1 region // Do we ever need to know all the information that we are storing? @@ -259,9 +259,6 @@ public final class HConstants { /** The META table's name. */ public static final byte [] META_TABLE_NAME = Bytes.toBytes(".META."); - /** delimiter used between portions of a region name */ - public static final int META_ROW_DELIMITER = ','; - /** The catalog family as a string*/ public static final String CATALOG_FAMILY_STR = "info"; diff --git src/main/java/org/apache/hadoop/hbase/HRegionInfo.java src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index 74cb821..1ba5640 100644 --- src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -23,7 +23,10 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.EOFException; import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; import java.util.Arrays; +import java.util.UUID; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -31,11 +34,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.catalog.MetaReader; + import org.apache.hadoop.hbase.migration.HRegionInfo090x; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSTableDescriptors; -import org.apache.hadoop.hbase.util.JenkinsHash; -import org.apache.hadoop.hbase.util.MD5Hash; +import org.apache.hadoop.hbase.util.*; import org.apache.hadoop.io.VersionedWritable; import org.apache.hadoop.io.WritableComparable; @@ -57,13 +60,13 @@ implements WritableComparable { * in the filesystem. * * New region name format: - * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. + * <tablename>,,<endkey>,<regionIdTimestamp>.<encodedName>. * where, * <encodedName> is a hex version of the MD5 hash of - * <tablename>,<startkey>,<regionIdTimestamp> + * <tablename>,<endkey>,<regionIdTimestamp> * * The old region name format: - * <tablename>,<startkey>,<regionIdTimestamp> + * <tablename>,<endkey>,<regionIdTimestamp> * For region names in the old format, the encoded name is a 32-bit * JenkinsHash integer value (in its decimal notation, string form). *

@@ -77,7 +80,7 @@ implements WritableComparable { /** Separator used to demarcate the encodedName in a region name * in the new format. See description on new format above. */ - private static final int ENC_SEPARATOR = '.'; + public static final int ENC_SEPARATOR = '.'; public static final int MD5_HEX_LENGTH = 32; /** @@ -91,7 +94,7 @@ implements WritableComparable { if ((regionName.length >= 1) && (regionName[regionName.length - 1] == ENC_SEPARATOR)) { // region name is new format. it contains the encoded name. - return true; + return true; } return false; } @@ -104,7 +107,7 @@ implements WritableComparable { String encodedName; if (hasEncodedName(regionName)) { // region is in new format: - // ,,/encodedName/ + // ,,/encodedName/ encodedName = Bytes.toString(regionName, regionName.length - MD5_HEX_LENGTH - 1, MD5_HEX_LENGTH); @@ -137,6 +140,11 @@ implements WritableComparable { /** delimiter used between portions of a region name */ public static final int DELIMITER = ','; + // It should say, the tablename encoded in the region ends with 0x01, + // but the last region's tablename ends with 0x02 + public static final int END_OF_TABLE_NAME = 1; + public static final int END_OF_TABLE_NAME_FOR_EMPTY_ENDKEY = END_OF_TABLE_NAME + 1; + /** HRegionInfo for root region */ public static final HRegionInfo ROOT_REGIONINFO = new HRegionInfo(0L, Bytes.toBytes("-ROOT-")); @@ -272,8 +280,7 @@ implements WritableComparable { this.offLine = false; this.regionId = regionid; - this.regionName = createRegionName(this.tableName, startKey, regionId, true); - + this.regionName = createRegionName(this.tableName, startKey, endKey, Long.toString(regionId).getBytes(), true); this.regionNameStr = Bytes.toStringBinary(this.regionName); this.split = split; this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone(); @@ -305,113 +312,170 @@ implements WritableComparable { /** * Make a region name of passed parameters. + * * @param tableName - * @param startKey Can be null - * @param regionid Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). - * @return Region name made of passed tableName, startKey and id + * @param endKey Can be null + * @param regionid Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format (such that it contains its encoded name?). + * @return Region name made of passed tableName, endKey and id */ - public static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final long regionid, boolean newFormat) { - return createRegionName(tableName, startKey, Long.toString(regionid), newFormat); + public static byte[] createRegionName(final byte[] tableName, + final byte[] endKey, + final long regionid, + boolean newFormat) { + return createRegionName(tableName, null, endKey, Long.toString(regionid).getBytes(), newFormat); } - /** - * Make a region name of passed parameters. - * @param tableName - * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). - * @return Region name made of passed tableName, startKey and id - */ - public static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final String id, boolean newFormat) { - return createRegionName(tableName, startKey, Bytes.toBytes(id), newFormat); - } /** * Make a region name of passed parameters. + * * @param tableName - * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). - * @return Region name made of passed tableName, startKey and id - */ - public static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final byte [] id, boolean newFormat) { - byte [] b = new byte [tableName.length + 2 + id.length + - (startKey == null? 0: startKey.length) + - (newFormat ? (MD5_HEX_LENGTH + 2) : 0)]; - - int offset = tableName.length; - System.arraycopy(tableName, 0, b, 0, offset); - b[offset++] = DELIMITER; - if (startKey != null && startKey.length > 0) { - System.arraycopy(startKey, 0, b, offset, startKey.length); - offset += startKey.length; + * @param startKey Can be null + * @param endKey Can be null + * @param id Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format (such that it contains its encoded name?). + * @return Region name made of passed tableName, endKey and id + */ + + public static byte [] createRegionName(final byte[] tableName, + byte[] startKey, + final byte[] endKey, + final byte[] id, + boolean newFormat){ + // We need to be able to add a single byte easily to the regionName as we are building it up + // It's being used by appending delimiters and special markers. + + byte [] oneByte = new byte[1]; + //This cannot return any weird string chars as all uuid chars are a hex char. + + byte[] uuidTableName = UUID.nameUUIDFromBytes(tableName).toString().getBytes(); + int allocation = uuidTableName == null ? 2 : uuidTableName.length + 2; + allocation += endKey == null ? 1 : endKey.length + 1; + allocation += id == null ? 0 : id.length; + + ByteBuffer byteArrayDataOutput = MappedByteBuffer.allocate(allocation); + byteArrayDataOutput.put(uuidTableName); + + if (endKey == null || endKey.length <= 0) { + oneByte[0] = END_OF_TABLE_NAME_FOR_EMPTY_ENDKEY; + byteArrayDataOutput.put(oneByte); + + oneByte[0] = DELIMITER; + byteArrayDataOutput.put(oneByte); + } else { + oneByte[0] = END_OF_TABLE_NAME; + byteArrayDataOutput.put(oneByte); + + oneByte[0] = DELIMITER; + byteArrayDataOutput.put(oneByte); + + byteArrayDataOutput.put(endKey); + } + byteArrayDataOutput.put(oneByte); + + if (id != null && id.length > 0) { + byteArrayDataOutput.put(id); } - b[offset++] = DELIMITER; - System.arraycopy(id, 0, b, offset, id.length); - offset += id.length; if (newFormat) { + return addEncoding(byteArrayDataOutput.array(), startKey); + } else { + return byteArrayDataOutput.array(); + } + } + + private static byte [] addEncoding(final byte[] metaKey, + final byte[] startKey) { + + // // Encoded name should be built into the region name. // - // Use the region name thus far (namely, ,,) + // Use the region name thus far (namely, ,,) // to compute a MD5 hash to be used as the encoded name, and append // it to the byte buffer. // - String md5Hash = MD5Hash.getMD5AsHex(b, 0, offset); - byte [] md5HashBytes = Bytes.toBytes(md5Hash); + + byte[] oneByte = new byte[1]; + byte[] md5HashBytes; + oneByte[0] = DELIMITER; + if (startKey == null || startKey.length < 1) { + md5HashBytes = MD5Hash.getMD5AsHex(metaKey).getBytes(); + } else { + md5HashBytes = MD5Hash.getMD5AsHex(Bytes.add(metaKey, + oneByte, + startKey)).getBytes(); + } if (md5HashBytes.length != MD5_HEX_LENGTH) { LOG.error("MD5-hash length mismatch: Expected=" + MD5_HEX_LENGTH + - "; Got=" + md5HashBytes.length); + "; Got=" + md5HashBytes.length); } - // now append the bytes '..' to the end - b[offset++] = ENC_SEPARATOR; - System.arraycopy(md5HashBytes, 0, b, offset, MD5_HEX_LENGTH); - offset += MD5_HEX_LENGTH; - b[offset++] = ENC_SEPARATOR; - } - - return b; + oneByte[0] = ENC_SEPARATOR; + byte [] encoding = Bytes.add(oneByte, md5HashBytes, oneByte); + return Bytes.add(metaKey, encoding); } + /** - * Gets the table name from the specified region name. - * @param regionName - * @return Table name. + * Get the tableName associated with that region. It only supports + * user regions, not the meta one. + * @param regionName The actually HBase regionName + * @return The Table name. */ - public static byte [] getTableName(byte [] regionName) { - int offset = -1; - for (int i = 0; i < regionName.length; i++) { - if (regionName[i] == DELIMITER) { - offset = i; - break; - } + @Deprecated + public static byte [] getTableName(byte[] regionName, Configuration conf) + throws IOException, + InterruptedException { + CatalogTracker catalogTracker = new CatalogTracker(conf); + catalogTracker.start(); + Pair region = MetaReader.getRegion(catalogTracker, + regionName); + catalogTracker.stop(); + if (region == null ){ + return null; } - byte [] tableName = new byte[offset]; - System.arraycopy(regionName, 0, tableName, 0, offset); - return tableName; + return region.getFirst().getTableName(); } /** + * Get the tableName associated with that region. It only supports + * user regions, not the meta one. + * @param regionName The actually HBase regionName + * @return The Table name. + */ + @Deprecated + public static byte [] getTableName(byte[] regionName) throws IOException, + InterruptedException { + Configuration c = HBaseConfiguration.create(); + return getTableName(regionName, c); + } + + private static boolean isLHSSplit(final byte[] regionName, + final int offset) throws IOException { + + byte last = regionName[offset]; + if (last == END_OF_TABLE_NAME || last == END_OF_TABLE_NAME_FOR_EMPTY_ENDKEY) { + return true; + } else if (DELIMITER == last) { + throw new IOException("Old Meta format"); + } + return false; + } + /** * Separate elements of a regionName. * @param regionName - * @return Array of byte[] containing tableName, startKey and id + * @return Array of byte[] containing tableNameUUID, endKey and id * @throws IOException */ public static byte [][] parseRegionName(final byte [] regionName) throws IOException { int offset = -1; + for (int i = 0; i < regionName.length; i++) { - if (regionName[i] == DELIMITER) { + if (isLHSSplit(regionName, offset)) { offset = i; break; } @@ -427,10 +491,10 @@ implements WritableComparable { } } if(offset == -1) throw new IOException("Invalid regionName format"); - byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; + byte [] endKey = HConstants.EMPTY_BYTE_ARRAY; if(offset != tableName.length + 1) { - startKey = new byte[offset - tableName.length - 1]; - System.arraycopy(regionName, tableName.length + 1, startKey, 0, + endKey = new byte[offset - tableName.length - 1]; + System.arraycopy(regionName, tableName.length + 1, endKey, 0, offset - tableName.length - 1); } byte [] id = new byte[regionName.length - offset - 1]; @@ -438,7 +502,7 @@ implements WritableComparable { regionName.length - offset - 1); byte [][] elements = new byte[3][]; elements[0] = tableName; - elements[1] = startKey; + elements[1] = endKey; elements[2] = id; return elements; } @@ -497,12 +561,12 @@ implements WritableComparable { } /** - * Get current table name of the region + * Get the tablename of the region from the regionKey * @return byte array of table name */ public byte[] getTableName() { if (tableName == null || tableName.length == 0) { - tableName = getTableName(getRegionName()); + tableName = getRegionName(); } return tableName; } diff --git src/main/java/org/apache/hadoop/hbase/KeyValue.java src/main/java/org/apache/hadoop/hbase/KeyValue.java index be7e2d8..5f82f02 100644 --- src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -26,6 +26,7 @@ import java.nio.ByteBuffer; import java.util.Comparator; import java.util.HashMap; import java.util.Map; +import java.util.UUID; import com.google.common.primitives.Longs; import org.apache.commons.logging.Log; @@ -64,6 +65,7 @@ import org.apache.hadoop.io.Writable; * The column does not contain the family/qualifier delimiter, {@link #COLUMN_FAMILY_DELIMITER} */ public class KeyValue implements Writable, HeapSize { + private static String UUIDMetaTableKey = UUID.nameUUIDFromBytes(Bytes.toBytes(".META.")).toString(); static final Log LOG = LogFactory.getLog(KeyValue.class); // TODO: Group Key-only comparators and operations into a Key class, just // for neatness sake, if can figure what to call it. @@ -1896,12 +1898,21 @@ public class KeyValue implements Writable, HeapSize { // Rows look like this: .META.,ROW_FROM_META,RID // LOG.info("ROOT " + Bytes.toString(left, loffset, llength) + // "---" + Bytes.toString(right, roffset, rlength)); - final int metalength = 7; // '.META.' length + final int metalength = UUIDMetaTableKey.length() + 2; // uuidof('.META.') + end of table + comma length int lmetaOffsetPlusDelimiter = loffset + metalength; + int rmetaOffsetPlusDelimiter = roffset + metalength; + int result; + + if (rlength > 0 && llength > 0) { + result = Bytes.compareTo(left, lmetaOffsetPlusDelimiter - 2, 1, right, rmetaOffsetPlusDelimiter - 2, 1); + //Compare the end of table marker in root + if (result != 0) { + return result; + } + } int leftFarDelimiter = getDelimiterInReverse(left, lmetaOffsetPlusDelimiter, llength - metalength, HRegionInfo.DELIMITER); - int rmetaOffsetPlusDelimiter = roffset + metalength; int rightFarDelimiter = getDelimiterInReverse(right, rmetaOffsetPlusDelimiter, rlength - metalength, HRegionInfo.DELIMITER); @@ -1913,7 +1924,7 @@ public class KeyValue implements Writable, HeapSize { } else if (leftFarDelimiter < 0 && rightFarDelimiter < 0) { return 0; } - int result = super.compareRows(left, lmetaOffsetPlusDelimiter, + result = super.compareRows(left, lmetaOffsetPlusDelimiter, leftFarDelimiter - lmetaOffsetPlusDelimiter, right, rmetaOffsetPlusDelimiter, rightFarDelimiter - rmetaOffsetPlusDelimiter); diff --git src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java index e5e60a8..4a8471a 100644 --- src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java +++ src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java @@ -32,11 +32,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; @@ -55,7 +51,8 @@ public class MetaReader { static final byte [] META_REGION_PREFIX; static { // Copy the prefix from FIRST_META_REGIONINFO into META_REGION_PREFIX. - // FIRST_META_REGIONINFO == '.META.,,1'. META_REGION_PREFIX == '.META.,' + // FIRST_META_REGIONINFO == '.UUIDoF(META)\002.,,1'. + // META_REGION_PREFIX == 'UUIDoF(.META.),' int len = HRegionInfo.FIRST_META_REGIONINFO.getRegionName().length - 2; META_REGION_PREFIX = new byte [len]; System.arraycopy(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), 0, @@ -443,7 +440,8 @@ public class MetaReader { this.results.add(this.current); } }; - fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableNameBytes)); + fullScan(catalogTracker, visitor, + MetaSearchRow.getStartRow(tableName.getBytes(), HConstants.EMPTY_BYTE_ARRAY)); // If visitor has results >= 1 then table exists. return visitor.getResults().size() >= 1; } @@ -502,18 +500,7 @@ public class MetaReader { return Bytes.equals(tableName, current.getTableName()); } - /** - * @param tableName - * @return Place to start Scan in .META. when passed a - * tableName; returns <tableName&rt; <,&rt; <,&rt; - */ - static byte [] getTableStartRowForMeta(final byte [] tableName) { - byte [] startRow = new byte[tableName.length + 2]; - System.arraycopy(tableName, 0, startRow, 0, tableName.length); - startRow[startRow.length - 2] = HRegionInfo.DELIMITER; - startRow[startRow.length - 1] = HRegionInfo.DELIMITER; - return startRow; - } + /** * This method creates a Scan object that will only scan catalog rows that @@ -527,12 +514,11 @@ public class MetaReader { public static Scan getScanForTableName(byte[] tableName) { String strName = Bytes.toString(tableName); // Start key is just the table name with delimiters - byte[] startKey = Bytes.toBytes(strName + ",,"); + byte[] startKey = MetaSearchRow.getStartRow(tableName, HConstants.EMPTY_BYTE_ARRAY); // Stop key appends the smallest possible char to the table name - byte[] stopKey = Bytes.toBytes(strName + " ,,"); + byte[] stopKey = MetaSearchRow.getStopRow(tableName); - Scan scan = new Scan(startKey); - scan.setStopRow(stopKey); + Scan scan = new Scan(startKey, stopKey); return scan; } @@ -597,7 +583,7 @@ public class MetaReader { this.results.add(this.current); } }; - fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableName), + fullScan(catalogTracker, visitor, MetaSearchRow.getStartRow(tableName, HConstants.EMPTY_BYTE_ARRAY), Bytes.equals(tableName, HConstants.META_TABLE_NAME)); return visitor.getResults(); } diff --git src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 6bff130..834ed5f 100644 --- src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -482,7 +482,7 @@ public class HBaseAdmin implements Abortable, Closeable { public void deleteTable(final byte [] tableName) throws IOException { isMasterRunning(); HTableDescriptor.isLegalTableName(tableName); - HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName); + HRegionLocation firstMetaServer = getLastMetaServerForTable(tableName); try { getMaster().deleteTable(tableName); } catch (RemoteException e) { @@ -1507,10 +1507,10 @@ public class HBaseAdmin implements Abortable, Closeable { return getMaster().getClusterStatus(); } - private HRegionLocation getFirstMetaServerForTable(final byte [] tableName) + private HRegionLocation getLastMetaServerForTable(final byte[] tableName) throws IOException { return connection.locateRegion(HConstants.META_TABLE_NAME, - HRegionInfo.createRegionName(tableName, null, HConstants.NINES, false)); + HRegionInfo.createRegionName(tableName, null, null, HConstants.NINES.getBytes(), false)); } /** diff --git src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index d475a1d..8282424 100644 --- src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -821,8 +821,13 @@ public class HConnectionManager { return null; } } else if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) { - return locateRegionInMeta(HConstants.ROOT_TABLE_NAME, tableName, row, - useCache, metaRegionLock); + + //HARD CODED TO POINT TO THE FIRST META TABLE + return locateRegionInMeta(HConstants.ROOT_TABLE_NAME, + HConstants.META_TABLE_NAME, + HConstants.EMPTY_BYTE_ARRAY, + useCache, + metaRegionLock); } else { // Region not in the cache - have to go to the meta RS return locateRegionInMeta(HConstants.META_TABLE_NAME, tableName, row, @@ -890,9 +895,51 @@ public class HConnectionManager { } } + + private HRegionInfo resultToHRegionInfo(final Result result, + byte[] tableName, + byte[] parentTable) + throws IOException { + + if (result == null) { + throw new TableNotFoundException(Bytes.toString(tableName)); + } + + byte[] value = result.getValue(HConstants.CATALOG_FAMILY, + HConstants.REGIONINFO_QUALIFIER); + + if (value == null || value.length == 0) { + throw new IOException("HRegionInfo was null or empty in " + + Bytes.toString(parentTable) + + ", row=" + + result); + } + + HRegionInfo regionInfo = Writables.getHRegionInfo(value); + + // possible we got a region of a different table... + + if (!Bytes.equals(regionInfo.getTableName(), tableName)) { + String errorMsg = "Table '" + Bytes.toString(tableName) + "' was not found, got: "; + if (regionInfo != null) { + errorMsg += Bytes.toString(regionInfo.getTableName()) + "."; + } + throw new TableNotFoundException(errorMsg); + } + + return regionInfo; + } + private boolean finishedScanningForRegion(HRegionInfo regionInfo) { + if (regionInfo == null || regionInfo.isOffline() || regionInfo.isSplit()) { + return false; + } + return true; + } + /* * Search one of the meta tables (-ROOT- or .META.) for the HRegionLocation - * info that contains the table and row we're seeking. + * info that contains the table and row we're seeking. If the row is null or 0 length + * then return a key which scans to the first meta key for the table. */ private HRegionLocation locateRegionInMeta(final byte [] parentTable, final byte [] tableName, final byte [] row, boolean useCache, @@ -908,17 +955,13 @@ public class HConnectionManager { } } - // build the key of the meta region we should be looking for. - // the extra 9's on the end are necessary to allow "exact" matches - // without knowing the precise region names. - byte [] metaKey = HRegionInfo.createRegionName(tableName, row, - HConstants.NINES, false); + final byte [] metaKey = MetaSearchRow.getStartRow(tableName, row); for (int tries = 0; true; tries++) { if (tries >= numRetries) { throw new NoServerForRegionException("Unable to find region for " + Bytes.toStringBinary(row) + " after " + numRetries + " tries."); } - + Result regionInfoRow = null; HRegionLocation metaLocation = null; try { // locate the root or meta region @@ -928,10 +971,11 @@ public class HConnectionManager { HRegionInterface server = getHRegionConnection(metaLocation.getHostname(), metaLocation.getPort()); - Result regionInfoRow = null; // This block guards against two threads trying to load the meta // region at the same time. The first will load the meta region and // the second will use the value that the first one found. + + HRegionInfo regionInfo = null; synchronized (regionLockObject) { // If the parent table is META, we may want to pre-fetch some // region info into the global region cache for this table. @@ -953,46 +997,61 @@ public class HConnectionManager { deleteCachedLocation(tableName, row); } - // Query the root or meta region for the location of the meta region - regionInfoRow = server.getClosestRowBefore( - metaLocation.getRegionInfo().getRegionName(), metaKey, - HConstants.CATALOG_FAMILY); - } - if (regionInfoRow == null) { - throw new TableNotFoundException(Bytes.toString(tableName)); - } - byte [] value = regionInfoRow.getValue(HConstants.CATALOG_FAMILY, - HConstants.REGIONINFO_QUALIFIER); - if (value == null || value.length == 0) { - throw new IOException("HRegionInfo was null or empty in " + - Bytes.toString(parentTable) + ", row=" + regionInfoRow); - } - // convert the row result into the HRegionLocation we need! - HRegionInfo regionInfo = (HRegionInfo) Writables.getWritable( - value, new HRegionInfo()); - // possible we got a region of a different table... - if (!Bytes.equals(regionInfo.getTableName(), tableName)) { - throw new TableNotFoundException( - "Table '" + Bytes.toString(tableName) + "' was not found, got: " + - Bytes.toString(regionInfo.getTableName()) + "."); + byte[] stopRow = MetaSearchRow.getStopRow(tableName); + Scan scan = new Scan(metaKey, stopRow).addFamily(HConstants.CATALOG_FAMILY); + long scannerId = server.openScanner(metaLocation.getRegionInfo().getRegionName(), scan); + + // We always try to get two rows just in case one of them is a split. + Result[] result = server.next(scannerId, 2); + + // We haven't cleared the meta entry out of the table yet + if (result == null || result.length <= 0 ) { + throw new TableNotFoundException("Table '" + Bytes.toString(tableName) + + " we searched for the StartKey: " + Bytes.toString(metaKey) + + " startKey lastChar's int value: " + (int) metaKey[metaKey.length -3] + + " with the stopKey: " + Bytes.toString(stopRow) + + " stopRow lastChar's int value: " + (int) stopRow[stopRow.length -3] + + " with parentTable:" + Bytes.toString(parentTable)); + } else if (result.length == 2) { + regionInfoRow = result[0]; + regionInfo = resultToHRegionInfo(regionInfoRow, + tableName, + parentTable); + if (regionInfo.isOffline()) { + regionInfoRow = result[1]; + regionInfo = resultToHRegionInfo(regionInfoRow, + tableName, + parentTable); + } + } else { + regionInfoRow = result[0]; + regionInfo = resultToHRegionInfo(regionInfoRow, tableName, parentTable); + } + } + + if (regionInfo == null) { + throw new TableNotFoundException("Table '" + + Bytes.toString(tableName) + "' was not found, got: " + + Bytes.toString(regionInfo.getTableName()) + "."); } + if (regionInfo.isSplit()) { throw new RegionOfflineException("the only available region for" + - " the required row is a split parent," + - " the daughters should be online soon: " + - regionInfo.getRegionNameAsString()); + " the required row is a split parent," + + " the daughters should be online soon: " + + regionInfo.getRegionNameAsString()); } if (regionInfo.isOffline()) { throw new RegionOfflineException("the region is offline, could" + - " be caused by a disable table call: " + - regionInfo.getRegionNameAsString()); + " be caused by a disable table call: " + + regionInfo.getRegionNameAsString()); } - value = regionInfoRow.getValue(HConstants.CATALOG_FAMILY, - HConstants.SERVER_QUALIFIER); String hostAndPort = ""; - if (value != null) { - hostAndPort = Bytes.toString(value); + if (regionInfoRow.getValue(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER) != null) { + hostAndPort = Bytes.toString(regionInfoRow.getValue(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER)); } if (hostAndPort.equals("")) { throw new NoServerForRegionException("No server address listed " + diff --git src/main/java/org/apache/hadoop/hbase/client/HTable.java src/main/java/org/apache/hadoop/hbase/client/HTable.java index 8cc6444..1dbf418 100644 --- src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -212,6 +212,7 @@ public class HTable implements HTableInterface, Closeable { this.finishSetup(); } + /** * setup this HTable's parameter based on the passed configuration * @param conf diff --git src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java index 4135e55..6e82db4 100644 --- src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java +++ src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java @@ -23,21 +23,16 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.Map; import java.util.NavigableMap; import java.util.TreeMap; -import java.util.TreeSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable; -import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Writables; @@ -142,39 +137,17 @@ public class MetaScanner { // if row is not null, we want to use the startKey of the row's region as // the startRow for the meta scan. - byte[] startRow; - if (row != null) { - // Scan starting at a particular row in a particular table - assert tableName != null; - byte[] searchRow = - HRegionInfo.createRegionName(tableName, row, HConstants.NINES, - false); - HTable metaTable = new HTable(configuration, HConstants.META_TABLE_NAME); - Result startRowResult = metaTable.getRowOrBefore(searchRow, - HConstants.CATALOG_FAMILY); - if (startRowResult == null) { - throw new TableNotFoundException("Cannot find row in .META. for table: " - + Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow)); - } - byte[] value = startRowResult.getValue(HConstants.CATALOG_FAMILY, - HConstants.REGIONINFO_QUALIFIER); - if (value == null || value.length == 0) { - throw new IOException("HRegionInfo was null or empty in Meta for " + - Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow)); - } - HRegionInfo regionInfo = Writables.getHRegionInfo(value); + byte[] startRow; + byte[] stopRow; - byte[] rowBefore = regionInfo.getStartKey(); - startRow = HRegionInfo.createRegionName(tableName, rowBefore, - HConstants.ZEROES, false); - } else if (tableName == null || tableName.length == 0) { + if (tableName == null || tableName.length == 0) { // Full META scan startRow = HConstants.EMPTY_START_ROW; + stopRow = null; } else { - // Scan META for an entire table - startRow = HRegionInfo.createRegionName( - tableName, HConstants.EMPTY_START_ROW, HConstants.ZEROES, false); + startRow = MetaSearchRow.getStartRow(tableName, row); + stopRow = MetaSearchRow.getStopRow(tableName); } // Scan over each meta region @@ -182,8 +155,15 @@ public class MetaScanner { int rows = Math.min(rowLimit, configuration.getInt( HConstants.HBASE_META_SCANNER_CACHING, HConstants.DEFAULT_HBASE_META_SCANNER_CACHING)); + do { - final Scan scan = new Scan(startRow).addFamily(HConstants.CATALOG_FAMILY); + Scan scan; + if (stopRow != null) { // Support full meta scans + scan = new Scan(startRow, stopRow).addFamily(HConstants.CATALOG_FAMILY); + } else { + scan = new Scan(startRow).addFamily(HConstants.CATALOG_FAMILY); + } + if (LOG.isDebugEnabled()) { LOG.debug("Scanning " + Bytes.toString(metaTableName) + " starting at row=" + Bytes.toStringBinary(startRow) + " for max=" + diff --git src/main/java/org/apache/hadoop/hbase/client/MetaSearchRow.java src/main/java/org/apache/hadoop/hbase/client/MetaSearchRow.java new file mode 100644 index 0000000..624a032 --- /dev/null +++ src/main/java/org/apache/hadoop/hbase/client/MetaSearchRow.java @@ -0,0 +1,87 @@ +/** + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import java.util.UUID; + +/** + * A Helper class to aid in searching for a matching region in meta. + */ +public class MetaSearchRow { + static final Log LOG = LogFactory.getLog(MetaSearchRow.class); + + /** + * This gives you a stop row for scanning for a particular region in meta. + * It's important that we don't scan past the end of the regions for a + * particular table. + * @param tableName The tablename in meta in which we want + * to provide a stopRow for scanning for. + * @return The stopRow to prevent scanning past the last region in meta for + * a table. + */ + + public static byte[] getStopRow(final byte[] tableName) { + final byte[] uuidTableName = UUID.nameUUIDFromBytes(tableName).toString().getBytes(); + byte[] b = new byte[uuidTableName.length + 3 + HConstants.NINES.getBytes().length]; + int offset = uuidTableName.length; + System.arraycopy(uuidTableName, 0, b, 0, offset); + + b[offset++] = (byte) (HRegionInfo.END_OF_TABLE_NAME_FOR_EMPTY_ENDKEY + 1); + b[offset++] = HRegionInfo.DELIMITER; + b[offset++] = HRegionInfo.DELIMITER; + System.arraycopy(HConstants.NINES.getBytes(), 0, b, offset, HConstants.NINES.getBytes().length); + return b; + } + + /** + * Get the first possible region that could match a particular uuid of + * tablename and searchrow. + * @param tableName The tableName in which we are searching for the matching + * region. + * @param searchRow The row in which we are searching for the matching region + * @return The first possible matching region. + */ + public static byte[] getStartRow(final byte[] tableName, final byte[] searchRow) { + // Get first region in META + final byte[] uuidTableName = UUID.nameUUIDFromBytes(tableName).toString().getBytes(); + + if (searchRow == null || searchRow.length == 0) { + byte[] startRow = new byte[uuidTableName.length + 3 + HConstants.ZEROES.getBytes().length]; + System.arraycopy(uuidTableName, 0, startRow, 0, uuidTableName.length); + startRow[uuidTableName.length] = HRegionInfo.END_OF_TABLE_NAME - 1; + startRow[uuidTableName.length + 1] = HRegionInfo.DELIMITER; + startRow[uuidTableName.length + 2] = HRegionInfo.DELIMITER; + System.arraycopy(HConstants.ZEROES.getBytes(), 0, startRow, uuidTableName.length + 3, HConstants.ZEROES.getBytes().length); + + return startRow; + } + + //Create tablename will handle uuiding our tablename + return HRegionInfo.createRegionName(tableName, + null, + searchRow, + HConstants.NINES.getBytes(), + false); + } +} diff --git src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java index bf85bc1..be5db4c 100644 --- src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java +++ src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java @@ -21,8 +21,8 @@ package org.apache.hadoop.hbase.rest; import java.io.IOException; -import java.net.InetSocketAddress; import java.util.Map; +import java.util.NavigableMap; import javax.ws.rs.GET; import javax.ws.rs.Produces; @@ -37,7 +37,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerAddress; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTableInterface; @@ -67,12 +67,12 @@ public class RegionsResource extends ResourceBase { this.tableResource = tableResource; } - private Map getTableRegions() + private NavigableMap getTableRegions() throws IOException { HTablePool pool = servlet.getTablePool(); HTableInterface table = pool.getTable(tableResource.getName()); try { - return ((HTable)table).getRegionsInfo(); + return ((HTable)table).getRegionLocations(); } finally { pool.putTable(table); } @@ -88,15 +88,14 @@ public class RegionsResource extends ResourceBase { try { String tableName = tableResource.getName(); TableInfoModel model = new TableInfoModel(tableName); - Map regions = getTableRegions(); - for (Map.Entry e: regions.entrySet()) { + NavigableMap regions = getTableRegions(); + for (Map.Entry e: regions.entrySet()) { HRegionInfo hri = e.getKey(); - HServerAddress addr = e.getValue(); - InetSocketAddress sa = addr.getInetSocketAddress(); + ServerName addr = e.getValue(); model.add( new TableRegionModel(tableName, hri.getRegionId(), hri.getStartKey(), hri.getEndKey(), - sa.getHostName() + ":" + Integer.valueOf(sa.getPort()))); + addr.getHostname() + ":" + Integer.valueOf(addr.getPort()))); } ResponseBuilder response = Response.ok(model); response.cacheControl(cacheControl); diff --git src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java index 67e7a04..17f699f 100644 --- src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java +++ src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java @@ -66,8 +66,7 @@ public class TableRegionModel implements Serializable { * @param startKey the start key of the region * @param endKey the end key of the region */ - public TableRegionModel(String table, long id, byte[] startKey, - byte[] endKey) { + public TableRegionModel(String table, long id, byte[] startKey, byte[] endKey) { this(table, id, startKey, endKey, null); } @@ -79,8 +78,7 @@ public class TableRegionModel implements Serializable { * @param endKey the end key of the region * @param location the name and port of the region server hosting the region */ - public TableRegionModel(String table, long id, byte[] startKey, - byte[] endKey, String location) { + public TableRegionModel(String table, long id, byte[] startKey, byte[] endKey, String location) { this.table = table; this.id = id; this.startKey = startKey; @@ -95,9 +93,11 @@ public class TableRegionModel implements Serializable { public String getName() { byte [] tableNameAsBytes = Bytes.toBytes(this.table); byte [] nameAsBytes = HRegionInfo.createRegionName(tableNameAsBytes, - this.startKey, this.id, - !HTableDescriptor.isMetaTable(tableNameAsBytes)); - return Bytes.toString(nameAsBytes); + this.startKey, + this.endKey, + Long.toString(this.id).getBytes(), + !HTableDescriptor.isMetaTable(tableNameAsBytes)); + return Bytes.toStringBinary(nameAsBytes); } /** @@ -109,6 +109,14 @@ public class TableRegionModel implements Serializable { } /** + * @return the table name + */ + @XmlAttribute + public String getTable() { + return table; + } + + /** * @return the start key */ @XmlAttribute @@ -133,15 +141,10 @@ public class TableRegionModel implements Serializable { } /** - * @param name region printable name + * @param table the table name */ - public void setName(String name) { - String split[] = name.split(","); - this.table = split[0]; - this.startKey = Bytes.toBytes(split[1]); - String tail = split[2]; - split = tail.split("\\."); - id = Long.valueOf(split[0]); + public void setTable(String table) { + this.table = table; } /** @@ -185,6 +188,8 @@ public class TableRegionModel implements Serializable { sb.append(Bytes.toString(startKey)); sb.append("'\n endKey='"); sb.append(Bytes.toString(endKey)); + sb.append("'\n table='"); + sb.append(table); if (location != null) { sb.append("'\n location='"); sb.append(location); diff --git src/main/java/org/apache/hadoop/hbase/util/Merge.java src/main/java/org/apache/hadoop/hbase/util/Merge.java index 67d0fda..00eb21e 100644 --- src/main/java/org/apache/hadoop/hbase/util/Merge.java +++ src/main/java/org/apache/hadoop/hbase/util/Merge.java @@ -45,6 +45,7 @@ import org.apache.hadoop.util.ToolRunner; import java.io.IOException; import java.util.List; +import java.util.UUID; /** * Utility that can merge any two regions in the same table: adjacent, @@ -364,7 +365,8 @@ public class Merge extends Configured implements Tool { } private boolean notInTable(final byte [] tn, final byte [] rn) { - if (WritableComparator.compareBytes(tn, 0, tn.length, rn, 0, tn.length) != 0) { + byte[] uuidTableName = UUID.nameUUIDFromBytes(tn).toString().getBytes(); + if (WritableComparator.compareBytes(uuidTableName, 0, uuidTableName.length, rn, 0, uuidTableName.length) != 0) { LOG.error("Region " + Bytes.toStringBinary(rn) + " does not belong to table " + Bytes.toString(tn)); return true; diff --git src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 9ea19e5..7a8e649 100644 --- src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -47,15 +47,7 @@ import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.master.HMaster; @@ -69,7 +61,6 @@ import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.Keying; import org.apache.hadoop.hbase.util.RegionSplitter; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Writables; @@ -1168,15 +1159,14 @@ public class HBaseTestingUtility { // TODO: Redo using MetaReader. HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME); List rows = new ArrayList(); - ResultScanner s = t.getScanner(new Scan()); + byte[] startRow = MetaSearchRow.getStartRow(tableName, HConstants.EMPTY_BYTE_ARRAY); + byte[] stopRow = MetaSearchRow.getStopRow(tableName); + Scan scan = new Scan(startRow,stopRow).addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + ResultScanner s = t.getScanner(scan); for (Result result : s) { - HRegionInfo info = Writables.getHRegionInfo( - result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER)); - if (Bytes.compareTo(info.getTableName(), tableName) == 0) { - LOG.info("getMetaTableRows: row -> " + - Bytes.toStringBinary(result.getRow())); - rows.add(result.getRow()); - } + LOG.info("getMetaTableRows: row -> " + + Bytes.toStringBinary(result.getRow())); + rows.add(result.getRow()); } s.close(); t.close(); diff --git src/test/java/org/apache/hadoop/hbase/TestKeyValue.java src/test/java/org/apache/hadoop/hbase/TestKeyValue.java index dc4ee8d..e672763 100644 --- src/test/java/org/apache/hadoop/hbase/TestKeyValue.java +++ src/test/java/org/apache/hadoop/hbase/TestKeyValue.java @@ -20,16 +20,21 @@ package org.apache.hadoop.hbase; import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; import java.util.Set; import java.util.TreeSet; import junit.framework.TestCase; +import org.apache.commons.collections.iterators.EmptyMapIterator; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValue.MetaComparator; import org.apache.hadoop.hbase.KeyValue.Type; +import org.apache.hadoop.hbase.client.MetaSearchRow; +import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.util.Bytes; import org.junit.experimental.categories.Category; @@ -52,8 +57,8 @@ public class TestKeyValue extends TestCase { assertTrue(aaa.matchingColumn(family2,qualifier2)); byte [] nullQualifier = new byte[0]; aaa = new KeyValue(a, family1, nullQualifier, 0L, Type.Put, a); - assertTrue(aaa.matchingColumn(family1,null)); - assertFalse(aaa.matchingColumn(family2,qualifier2)); + assertTrue(aaa.matchingColumn(family1, null)); + assertFalse(aaa.matchingColumn(family2, qualifier2)); } public void testBasics() throws Exception { @@ -75,6 +80,21 @@ public class TestKeyValue extends TestCase { LOG.info(kv.toString()); } + public void testTroublesomeRow() { + long now = System.currentTimeMillis(); + + final byte[] a = HRegionInfo.createRegionName(Bytes.toBytes("foo"), + Bytes.toBytes("foo\u0000"), + 0, + true); + final byte[] b = HRegionInfo.createRegionName(Bytes.toBytes("foo"), + Bytes.toBytes("foo"), + 0, + true); + assertTrue(KeyValue.META_COMPARATOR.compare(new KeyValue(a, now), new KeyValue(b, now)) > 0); + + } + public void testPlainCompare() throws Exception { final byte [] a = Bytes.toBytes("aaa"); final byte [] b = Bytes.toBytes("bbb"); @@ -115,34 +135,70 @@ public class TestKeyValue extends TestCase { public void testMoreComparisons() throws Exception { // Root compares + + byte[] endRow = HRegionInfo.createRegionName(Bytes.toBytes(".META."), + HConstants.EMPTY_BYTE_ARRAY, + 99999999999999l, + false); + + byte[] startRow = HRegionInfo.createRegionName(Bytes.toBytes(".META."), + HConstants.EMPTY_BYTE_ARRAY, + 1l, + false); + long now = System.currentTimeMillis(); - KeyValue a = new KeyValue(Bytes.toBytes(".META.,,99999999999999"), now); - KeyValue b = new KeyValue(Bytes.toBytes(".META.,,1"), now); + KeyValue a = new KeyValue(endRow, now); + KeyValue b = new KeyValue(startRow, now); KVComparator c = new KeyValue.RootComparator(); assertTrue(c.compare(b, a) < 0); - KeyValue aa = new KeyValue(Bytes.toBytes(".META.,,1"), now); - KeyValue bb = new KeyValue(Bytes.toBytes(".META.,,1"), - Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1235943454602L, - (byte[])null); + KeyValue aa = new KeyValue(startRow, now); + KeyValue bb = new KeyValue(startRow, + Bytes.toBytes("info"), + Bytes.toBytes("regioninfo"), + 1235943454602L, + (byte[])null); assertTrue(c.compare(aa, bb) < 0); - // Meta compares - KeyValue aaa = new KeyValue( - Bytes.toBytes("TestScanMultipleVersions,row_0500,1236020145502"), now); - KeyValue bbb = new KeyValue( - Bytes.toBytes("TestScanMultipleVersions,,99999999999999"), now); - c = new KeyValue.MetaComparator(); - assertTrue(c.compare(bbb, aaa) < 0); + // Meta compares + byte [] testScanMultipleVersion = Bytes.toBytes("TestScanMultipleVersions"); +// Bytes.toBytes("TestScanMultipleVersions,row_0500,1236020145502"), now); +// Bytes.toBytes("TestScanMultipleVersions,,99999999999999"), now); - KeyValue aaaa = new KeyValue(Bytes.toBytes("TestScanMultipleVersions,,1236023996656"), - Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1236024396271L, - (byte[])null); + byte[] tsmRegion = HRegionInfo.createRegionName(testScanMultipleVersion, Bytes.toBytes("row_0500"), 1236020145502l, false); + startRow = HRegionInfo.createRegionName(testScanMultipleVersion, HConstants.EMPTY_BYTE_ARRAY, 99999999999999l, false); + + KeyValue aaa = new KeyValue(tsmRegion, now); + KeyValue bbb = new KeyValue(startRow, now); + c = new KeyValue.MetaComparator(); + assertTrue(c.compare(bbb, aaa) > 0); + + // Meta compares + + + //Bytes.toBytes("TestScanMultipleVersions"),,1236023996656") + byte[] tscmvRegionEmpty = HRegionInfo.createRegionName(testScanMultipleVersion, + HConstants.EMPTY_BYTE_ARRAY, + 1236023996656l, + true); + + //"TestScanMultipleVersions,row_0500,1236034574162" + byte[] tscmvRegion500 = HRegionInfo.createRegionName(testScanMultipleVersion, + Bytes.toBytes("row_0500"), + 1236023996656l, + true); + KeyValue aaaa = new KeyValue(tscmvRegionEmpty, + Bytes.toBytes("info"), + Bytes.toBytes("regioninfo"), + 1236024396271L, + (byte[])null); assertTrue(c.compare(aaaa, bbb) < 0); - KeyValue x = new KeyValue(Bytes.toBytes("TestScanMultipleVersions,row_0500,1236034574162"), - Bytes.toBytes("info"), Bytes.toBytes(""), 9223372036854775807L, - (byte[])null); - KeyValue y = new KeyValue(Bytes.toBytes("TestScanMultipleVersions,row_0500,1236034574162"), + KeyValue x = new KeyValue(tscmvRegion500, + Bytes.toBytes("info"), + Bytes.toBytes(""), + 9223372036854775807L, + (byte[])null); + KeyValue y = new KeyValue(tscmvRegion500, Bytes.toBytes("info"), Bytes.toBytes("regioninfo"), 1236034574912L, (byte[])null); assertTrue(c.compare(x, y) < 0); @@ -187,74 +243,141 @@ public class TestKeyValue extends TestCase { public void testKeyValueBorderCases() throws IOException { // % sorts before , so if we don't do special comparator, rowB would // come before rowA. - KeyValue rowA = new KeyValue(Bytes.toBytes("testtable,www.hbase.org/,1234"), - Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); - KeyValue rowB = new KeyValue(Bytes.toBytes("testtable,www.hbase.org/%20,99999"), - Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); + + byte[] metaRowA = HRegionInfo.createRegionName(Bytes.toBytes("testtable"), + Bytes.toBytes("www.hbase.org/"), + 1234, + false); + byte[] metaRowB = HRegionInfo.createRegionName(Bytes.toBytes("testtable"), + Bytes.toBytes("www.hbase.org/%20"), + 99999, + false); + + + KeyValue rowA = new KeyValue(metaRowA, Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); + KeyValue rowB = new KeyValue(metaRowB, Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); assertTrue(KeyValue.META_COMPARATOR.compare(rowA, rowB) < 0); - rowA = new KeyValue(Bytes.toBytes("testtable,,1234"), Bytes.toBytes("fam"), + + metaRowA = HRegionInfo.createRegionName(Bytes.toBytes("testtable"), + HConstants.EMPTY_BYTE_ARRAY, + 1234, + true); + + metaRowB = HRegionInfo.createRegionName(Bytes.toBytes("testtable"), + Bytes.toBytes("www.hbase.org/%20"), + 99999, + true); + rowA = new KeyValue(metaRowA, Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); - rowB = new KeyValue(Bytes.toBytes("testtable,$www.hbase.org/,99999"), + rowB = new KeyValue(metaRowB, Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); - assertTrue(KeyValue.META_COMPARATOR.compare(rowA, rowB) < 0); + assertTrue(KeyValue.META_COMPARATOR.compare(rowA, rowB) > 0); + + metaRowA = HRegionInfo.createRegionName(Bytes.toBytes(".META."), + Bytes.toBytes("testtable,www.hbase.org/,1234"), + 1234, + true); - rowA = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/,1234,4321"), + metaRowB = HRegionInfo.createRegionName(Bytes.toBytes(".META."), + Bytes.toBytes("testtable,www.hbase.org/%20,99999"), + 99999, + true); + + rowA = new KeyValue(metaRowA, Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); - rowB = new KeyValue(Bytes.toBytes(".META.,testtable,www.hbase.org/%20,99999,99999"), + rowB = new KeyValue(metaRowB, Bytes.toBytes("fam"), Bytes.toBytes(""), Long.MAX_VALUE, (byte[])null); assertTrue(KeyValue.ROOT_COMPARATOR.compare(rowA, rowB) < 0); } private void metacomparisons(final KeyValue.MetaComparator c) { long now = System.currentTimeMillis(); - assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now), - new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now)) == 0); - KeyValue a = new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now); - KeyValue b = new KeyValue(Bytes.toBytes(".META.,a,,0,2"), now); + + byte[] metaRowA = HRegionInfo.createRegionName(Bytes.toBytes(".META."), + Bytes.toBytes("a,,0"), + 1, + true); + + byte[] metaRowB = HRegionInfo.createRegionName(Bytes.toBytes(".META."), + Bytes.toBytes("a,,0"), + 2, + true); + + + assertTrue(c.compare(new KeyValue(metaRowA, now), + new KeyValue(metaRowA, now)) == 0); + KeyValue a = new KeyValue(metaRowA, now); + KeyValue b = new KeyValue(metaRowB, now); assertTrue(c.compare(a, b) < 0); - assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,a,,0,2"), now), - new KeyValue(Bytes.toBytes(".META.,a,,0,1"), now)) > 0); + assertTrue(c.compare(new KeyValue(metaRowB, now), + new KeyValue(metaRowA, now)) > 0); } private void comparisons(final KeyValue.KVComparator c) { long now = System.currentTimeMillis(); - assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,,1"), now), - new KeyValue(Bytes.toBytes(".META.,,1"), now)) == 0); - assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,,1"), now), - new KeyValue(Bytes.toBytes(".META.,,2"), now)) < 0); - assertTrue(c.compare(new KeyValue(Bytes.toBytes(".META.,,2"), now), - new KeyValue(Bytes.toBytes(".META.,,1"), now)) > 0); + byte[] metaRowA = HRegionInfo.createRegionName(Bytes.toBytes(".META."), + HConstants.EMPTY_BYTE_ARRAY, + 1, + true); + byte[] metaRowB = HRegionInfo.createRegionName(Bytes.toBytes(".META."), + HConstants.EMPTY_BYTE_ARRAY, + 2, + true); + + assertTrue(c.compare(new KeyValue(metaRowA, now), + new KeyValue(metaRowA, now)) == 0); + assertTrue(c.compare(new KeyValue(metaRowA, now), + new KeyValue(metaRowB, now)) < 0); + assertTrue(c.compare(new KeyValue(metaRowB, now), + new KeyValue(metaRowA, now)) > 0); } public void testBinaryKeys() throws Exception { - Set set = new TreeSet(KeyValue.COMPARATOR); + Set set; final byte [] fam = Bytes.toBytes("col"); final byte [] qf = Bytes.toBytes("umn"); final byte [] nb = new byte[0]; - KeyValue [] keys = {new KeyValue(Bytes.toBytes("aaaaa,\u0000\u0000,2"), fam, qf, 2, nb), - new KeyValue(Bytes.toBytes("aaaaa,\u0001,3"), fam, qf, 3, nb), - new KeyValue(Bytes.toBytes("aaaaa,,1"), fam, qf, 1, nb), - new KeyValue(Bytes.toBytes("aaaaa,\u1000,5"), fam, qf, 5, nb), - new KeyValue(Bytes.toBytes("aaaaa,a,4"), fam, qf, 4, nb), - new KeyValue(Bytes.toBytes("a,a,0"), fam, qf, 0, nb), + + byte[] metaRow0 = HRegionInfo.createRegionName(Bytes.toBytes("a"), + Bytes.toBytes("a"), + 0, + false); + + byte[] metaRow1 = HRegionInfo.createRegionName(Bytes.toBytes("aaaaa"), + Bytes.toBytes("\u0000\u0000"), + 1, + false); + + byte[] metaRow2 = HRegionInfo.createRegionName(Bytes.toBytes("aaaaa"), + Bytes.toBytes(","), + 2, + false); + + byte[] metaRow3 = HRegionInfo.createRegionName(Bytes.toBytes("aaaaa"), + Bytes.toBytes("a"), + 3, + false); + + byte[] metaRow4 = HRegionInfo.createRegionName(Bytes.toBytes("aaaaa"), + Bytes.toBytes("\u1000"), + 4, + false); + + byte[] metaRow5 = HRegionInfo.createRegionName(Bytes.toBytes("aaaaa"), + HConstants.EMPTY_BYTE_ARRAY, + 5, + false); + + KeyValue [] keys = { + new KeyValue(metaRow2, fam, qf, 2, nb), + new KeyValue(metaRow3, fam, qf, 3, nb), + new KeyValue(metaRow1, fam, qf, 1, nb), + new KeyValue(metaRow5, fam, qf, 5, nb), + new KeyValue(metaRow4, fam, qf, 4, nb), + new KeyValue(metaRow0, fam, qf, 0, nb), }; - // Add to set with bad comparator - for (int i = 0; i < keys.length; i++) { - set.add(keys[i]); - } - // This will output the keys incorrectly. - boolean assertion = false; - int count = 0; - try { - for (KeyValue k: set) { - assertTrue(count++ == k.getTimestamp()); - } - } catch (junit.framework.AssertionFailedError e) { - // Expected - assertion = true; - } - assertTrue(assertion); + int count; // Make set with good comparator set = new TreeSet(new KeyValue.MetaComparator()); for (int i = 0; i < keys.length; i++) { @@ -265,21 +388,51 @@ public class TestKeyValue extends TestCase { assertTrue(count++ == k.getTimestamp()); } // Make up -ROOT- table keys. - KeyValue [] rootKeys = { - new KeyValue(Bytes.toBytes(".META.,aaaaa,\u0000\u0000,0,2"), fam, qf, 2, nb), - new KeyValue(Bytes.toBytes(".META.,aaaaa,\u0001,0,3"), fam, qf, 3, nb), - new KeyValue(Bytes.toBytes(".META.,aaaaa,,0,1"), fam, qf, 1, nb), - new KeyValue(Bytes.toBytes(".META.,aaaaa,\u1000,0,5"), fam, qf, 5, nb), - new KeyValue(Bytes.toBytes(".META.,aaaaa,a,0,4"), fam, qf, 4, nb), - new KeyValue(Bytes.toBytes(".META.,,0"), fam, qf, 0, nb), - }; + byte[] metaTable = Bytes.toBytes(".META"); + metaRow1 = HRegionInfo.createRegionName(metaTable, + Bytes.toBytes("aaaaa,\u0000\u0000,0"), + 1, + true); + + metaRow2 = HRegionInfo.createRegionName(metaTable, + Bytes.toBytes("aaaaa,\u0001,0"), + 2, + true); + metaRow0 = HRegionInfo.createRegionName(metaTable, + Bytes.toBytes("aaaaa,,0"), + 0, + true); + + metaRow4 = HRegionInfo.createRegionName(metaTable, + Bytes.toBytes("aaaaa,\u1000,0"), + 4, + true); + + metaRow3 = HRegionInfo.createRegionName(metaTable, + Bytes.toBytes("aaaaa,a,0"), + 3, + true); + metaRow5 = HRegionInfo.createRegionName(metaTable, + HConstants.EMPTY_BYTE_ARRAY, + 5, + true); + + + KeyValue[] rootKeys = { + new KeyValue(metaRow2, fam, qf, 2, nb), + new KeyValue(metaRow3, fam, qf, 3, nb), + new KeyValue(metaRow1, fam, qf, 1, nb), + new KeyValue(metaRow5, fam, qf, 5, nb), + new KeyValue(metaRow4, fam, qf, 4, nb), + new KeyValue(metaRow0, fam, qf, 0, nb), + }; // This will output the keys incorrectly. set = new TreeSet(new KeyValue.MetaComparator()); // Add to set with bad comparator for (int i = 0; i < keys.length; i++) { set.add(rootKeys[i]); } - assertion = false; + boolean assertion = false; count = 0; try { for (KeyValue k: set) { diff --git src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index 9f66880..8ba0a6a 100644 --- src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -26,13 +26,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; +import java.util.*; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -1167,13 +1161,13 @@ public class TestAdmin { public void testCloseRegionIfInvalidRegionNameIsPassed() throws Exception { byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegion1"); createTableWithDefaultConf(TABLENAME); - + String encodedTableName = UUID.nameUUIDFromBytes(TABLENAME).toString(); HRegionInfo info = null; HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLENAME); List onlineRegions = rs.getOnlineRegions(); for (HRegionInfo regionInfo : onlineRegions) { if (!regionInfo.isMetaTable()) { - if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegion1")) { + if (regionInfo.getRegionNameAsString().contains(encodedTableName)) { info = regionInfo; admin.closeRegionWithEncodedRegionName("sample", rs.getServerName() .getServerName()); @@ -1189,16 +1183,16 @@ public class TestAdmin { public void testCloseRegionThatFetchesTheHRIFromMeta() throws Exception { byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegion2"); createTableWithDefaultConf(TABLENAME); + String encodedTableName = UUID.nameUUIDFromBytes(TABLENAME).toString(); HRegionInfo info = null; HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLENAME); List onlineRegions = rs.getOnlineRegions(); for (HRegionInfo regionInfo : onlineRegions) { if (!regionInfo.isMetaTable()) { - - if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegion2")) { + if (regionInfo.getRegionNameAsString().contains(encodedTableName)) { info = regionInfo; - admin.closeRegion(regionInfo.getRegionNameAsString(), rs + admin.closeRegion(regionInfo.getRegionName(), rs .getServerName().getServerName()); } } @@ -1219,7 +1213,7 @@ public class TestAdmin { public void testCloseRegionWhenServerNameIsNull() throws Exception { byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegion3"); createTableWithDefaultConf(TABLENAME); - + String encodedTableName = UUID.nameUUIDFromBytes(TABLENAME).toString(); HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLENAME); try { @@ -1227,7 +1221,7 @@ public class TestAdmin { for (HRegionInfo regionInfo : onlineRegions) { if (!regionInfo.isMetaTable()) { if (regionInfo.getRegionNameAsString() - .contains("TestHBACloseRegion3")) { + .contains(encodedTableName)) { admin.closeRegionWithEncodedRegionName(regionInfo.getEncodedName(), null); } @@ -1243,7 +1237,7 @@ public class TestAdmin { public void testCloseRegionWhenServerNameIsEmpty() throws Exception { byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegionWhenServerNameIsEmpty"); createTableWithDefaultConf(TABLENAME); - + String encodedTableName = UUID.nameUUIDFromBytes(TABLENAME).toString(); HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLENAME); try { @@ -1251,7 +1245,7 @@ public class TestAdmin { for (HRegionInfo regionInfo : onlineRegions) { if (!regionInfo.isMetaTable()) { if (regionInfo.getRegionNameAsString() - .contains("TestHBACloseRegionWhenServerNameIsEmpty")) { + .contains(encodedTableName)) { admin.closeRegionWithEncodedRegionName(regionInfo.getEncodedName(), " "); } @@ -1266,6 +1260,7 @@ public class TestAdmin { public void testCloseRegionWhenEncodedRegionNameIsNotGiven() throws Exception { byte[] TABLENAME = Bytes.toBytes("TestHBACloseRegion4"); createTableWithDefaultConf(TABLENAME); + String encodedTableName = UUID.nameUUIDFromBytes(TABLENAME).toString(); HRegionInfo info = null; HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLENAME); @@ -1273,7 +1268,7 @@ public class TestAdmin { List onlineRegions = rs.getOnlineRegions(); for (HRegionInfo regionInfo : onlineRegions) { if (!regionInfo.isMetaTable()) { - if (regionInfo.getRegionNameAsString().contains("TestHBACloseRegion4")) { + if (regionInfo.getRegionNameAsString().contains(encodedTableName)) { info = regionInfo; admin.closeRegionWithEncodedRegionName(regionInfo .getRegionNameAsString(), rs.getServerName().getServerName()); diff --git src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALObserver.java src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALObserver.java index ff9c502..20f8915 100644 --- src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALObserver.java +++ src/test/java/org/apache/hadoop/hbase/coprocessor/SampleRegionWALObserver.java @@ -87,7 +87,7 @@ implements WALObserver { HRegionInfo info, HLogKey logKey, WALEdit logEdit) throws IOException { boolean bypass = false; // check table name matches or not. - if (!Arrays.equals(HRegionInfo.getTableName(info.getRegionName()), this.tableName)) { + if (!Arrays.equals(info.getTableName(), this.tableName)) { return bypass; } preWALWriteCalled = true; diff --git src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java index 368a0e5..4bc66c0 100644 --- src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java +++ src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java @@ -55,12 +55,24 @@ public class TestClassLoading { private static MiniDFSCluster cluster; static final int BUFFER_SIZE = 4096; - static final String tableName = "TestClassLoading"; - static final String cpName1 = "TestCP1"; - static final String cpName2 = "TestCP2"; - static final String cpName3 = "TestCP3"; - static final String cpName4 = "TestCP4"; - static final String cpName5 = "TestCP5"; + static final String TABLE_NAME = "TestClassLoading"; + static final String UUID_TABLE_NAME = UUID.nameUUIDFromBytes(TABLE_NAME.getBytes()).toString(); + + static final String CP_NAME_1 = "TestCP1"; + static final String UUID_CP_NAME_1 = UUID.nameUUIDFromBytes(CP_NAME_1.getBytes()).toString(); + + static final String CP_NAME_2 = "TestCP2"; + static final String UUID_CP_NAME_2 = UUID.nameUUIDFromBytes(CP_NAME_2.getBytes()).toString(); + + static final String CP_NAME_3 = "TestCP3"; + static final String UUID_CP_NAME_3 = UUID.nameUUIDFromBytes(CP_NAME_3.getBytes()).toString(); + + static final String CP_NAME_4 = "TestCP4"; + static final String UUID_CP_NAME_4 = UUID.nameUUIDFromBytes(CP_NAME_4.getBytes()).toString(); + + static final String CP_NAME_5 = "TestCP5"; + static final String UUID_CP_NAME_5 = UUID.nameUUIDFromBytes(CP_NAME_5.getBytes()).toString(); + private static Class regionCoprocessor1 = ColumnAggregationEndpoint.class; private static Class regionCoprocessor2 = GenericEndpoint.class; @@ -200,8 +212,8 @@ public class TestClassLoading { public void testClassLoadingFromHDFS() throws Exception { FileSystem fs = cluster.getFileSystem(); - File jarFile1 = buildCoprocessorJar(cpName1); - File jarFile2 = buildCoprocessorJar(cpName2); + File jarFile1 = buildCoprocessorJar(CP_NAME_1); + File jarFile2 = buildCoprocessorJar(CP_NAME_2); // copy the jars into dfs fs.copyFromLocalFile(new Path(jarFile1.getPath()), @@ -221,18 +233,18 @@ public class TestClassLoading { LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS2); // create a table that references the coprocessors - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TABLE_NAME); htd.addFamily(new HColumnDescriptor("test")); // without configuration values - htd.setValue("COPROCESSOR$1", jarFileOnHDFS1.toString() + "|" + cpName1 + + htd.setValue("COPROCESSOR$1", jarFileOnHDFS1.toString() + "|" + CP_NAME_1 + "|" + Coprocessor.PRIORITY_USER); // with configuration values - htd.setValue("COPROCESSOR$2", jarFileOnHDFS2.toString() + "|" + cpName2 + + htd.setValue("COPROCESSOR$2", jarFileOnHDFS2.toString() + "|" + CP_NAME_2 + "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); HBaseAdmin admin = new HBaseAdmin(this.conf); - if (admin.tableExists(tableName)) { - admin.disableTable(tableName); - admin.deleteTable(tableName); + if (admin.tableExists(TABLE_NAME)) { + admin.disableTable(TABLE_NAME); + admin.deleteTable(TABLE_NAME); } admin.createTable(htd); @@ -242,13 +254,13 @@ public class TestClassLoading { MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { - if (region.getRegionNameAsString().startsWith(tableName)) { + if (region.getRegionNameAsString().startsWith(UUID_TABLE_NAME)) { CoprocessorEnvironment env; - env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1); + env = region.getCoprocessorHost().findCoprocessorEnvironment(CP_NAME_1); if (env != null) { found1 = true; } - env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2); + env = region.getCoprocessorHost().findCoprocessorEnvironment(CP_NAME_2); if (env != null) { found2 = true; Configuration conf = env.getConfiguration(); @@ -258,8 +270,8 @@ public class TestClassLoading { } } } - assertTrue("Class " + cpName1 + " was missing on a region", found1); - assertTrue("Class " + cpName2 + " was missing on a region", found2); + assertTrue("Class " + CP_NAME_1 + " was missing on a region", found1); + assertTrue("Class " + CP_NAME_2 + " was missing on a region", found2); assertTrue("Configuration key 'k1' was missing on a region", found2_k1); assertTrue("Configuration key 'k2' was missing on a region", found2_k2); assertTrue("Configuration key 'k3' was missing on a region", found2_k3); @@ -268,12 +280,12 @@ public class TestClassLoading { @Test // HBASE-3516: Test CP Class loading from local file system public void testClassLoadingFromLocalFS() throws Exception { - File jarFile = buildCoprocessorJar(cpName3); + File jarFile = buildCoprocessorJar(CP_NAME_3); // create a table that references the jar - HTableDescriptor htd = new HTableDescriptor(cpName3); + HTableDescriptor htd = new HTableDescriptor(CP_NAME_3); htd.addFamily(new HColumnDescriptor("test")); - htd.setValue("COPROCESSOR$1", jarFile.toString() + "|" + cpName3 + "|" + + htd.setValue("COPROCESSOR$1", jarFile.toString() + "|" + CP_NAME_3 + "|" + Coprocessor.PRIORITY_USER); HBaseAdmin admin = new HBaseAdmin(this.conf); admin.createTable(htd); @@ -283,11 +295,11 @@ public class TestClassLoading { MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { - if (region.getRegionNameAsString().startsWith(cpName3)) { - found = (region.getCoprocessorHost().findCoprocessor(cpName3) != null); + if (region.getRegionNameAsString().startsWith(UUID_CP_NAME_3)) { + found = (region.getCoprocessorHost().findCoprocessor(CP_NAME_3) != null); } } - assertTrue("Class " + cpName3 + " was missing on a region", found); + assertTrue("Class " + CP_NAME_3 + " was missing on a region", found); } @Test @@ -296,24 +308,24 @@ public class TestClassLoading { public void testHBase3810() throws Exception { // allowed value pattern: [path] | class name | [priority] | [key values] - File jarFile1 = buildCoprocessorJar(cpName1); - File jarFile2 = buildCoprocessorJar(cpName2); - File jarFile4 = buildCoprocessorJar(cpName4); - File jarFile5 = buildCoprocessorJar(cpName5); + File jarFile1 = buildCoprocessorJar(CP_NAME_1); + File jarFile2 = buildCoprocessorJar(CP_NAME_2); + File jarFile4 = buildCoprocessorJar(CP_NAME_4); + File jarFile5 = buildCoprocessorJar(CP_NAME_5); String cpKey1 = "COPROCESSOR$1"; String cpKey2 = " Coprocessor$2 "; String cpKey3 = " coprocessor$03 "; - String cpValue1 = jarFile1.toString() + "|" + cpName1 + "|" + + String cpValue1 = jarFile1.toString() + "|" + CP_NAME_1 + "|" + Coprocessor.PRIORITY_USER; - String cpValue2 = jarFile2.toString() + " | " + cpName2 + " | "; + String cpValue2 = jarFile2.toString() + " | " + CP_NAME_2 + " | "; // load from default class loader String cpValue3 = " | org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver | | k=v "; // create a table that references the jar - HTableDescriptor htd = new HTableDescriptor(tableName); + HTableDescriptor htd = new HTableDescriptor(TABLE_NAME); htd.addFamily(new HColumnDescriptor("test")); // add 3 coprocessors by setting htd attributes directly. @@ -322,19 +334,19 @@ public class TestClassLoading { htd.setValue(cpKey3, cpValue3); // add 2 coprocessor by using new htd.addCoprocessor() api - htd.addCoprocessor(cpName4, new Path(jarFile4.getPath()), + htd.addCoprocessor(CP_NAME_4, new Path(jarFile4.getPath()), Coprocessor.PRIORITY_USER, null); Map kvs = new HashMap(); kvs.put("k1", "v1"); kvs.put("k2", "v2"); kvs.put("k3", "v3"); - htd.addCoprocessor(cpName5, new Path(jarFile5.getPath()), + htd.addCoprocessor(CP_NAME_5, new Path(jarFile5.getPath()), Coprocessor.PRIORITY_USER, kvs); HBaseAdmin admin = new HBaseAdmin(this.conf); - if (admin.tableExists(tableName)) { - admin.disableTable(tableName); - admin.deleteTable(tableName); + if (admin.tableExists(TABLE_NAME)) { + admin.disableTable(TABLE_NAME); + admin.deleteTable(TABLE_NAME); } admin.createTable(htd); @@ -347,19 +359,19 @@ public class TestClassLoading { MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { - if (region.getRegionNameAsString().startsWith(tableName)) { + if (region.getRegionNameAsString().startsWith(UUID_TABLE_NAME)) { found_1 = found_1 || - (region.getCoprocessorHost().findCoprocessor(cpName1) != null); + (region.getCoprocessorHost().findCoprocessor(CP_NAME_1) != null); found_2 = found_2 || - (region.getCoprocessorHost().findCoprocessor(cpName2) != null); + (region.getCoprocessorHost().findCoprocessor(CP_NAME_2) != null); found_3 = found_3 || (region.getCoprocessorHost().findCoprocessor("SimpleRegionObserver") != null); found_4 = found_4 || - (region.getCoprocessorHost().findCoprocessor(cpName4) != null); + (region.getCoprocessorHost().findCoprocessor(CP_NAME_4) != null); CoprocessorEnvironment env = - region.getCoprocessorHost().findCoprocessorEnvironment(cpName5); + region.getCoprocessorHost().findCoprocessorEnvironment(CP_NAME_5); if (env != null) { found_5 = true; Configuration conf = env.getConfiguration(); @@ -370,11 +382,11 @@ public class TestClassLoading { } } - assertTrue("Class " + cpName1 + " was missing on a region", found_1); - assertTrue("Class " + cpName2 + " was missing on a region", found_2); + assertTrue("Class " + CP_NAME_1 + " was missing on a region", found_1); + assertTrue("Class " + CP_NAME_2 + " was missing on a region", found_2); assertTrue("Class SimpleRegionObserver was missing on a region", found_3); - assertTrue("Class " + cpName4 + " was missing on a region", found_4); - assertTrue("Class " + cpName5 + " was missing on a region", found_5); + assertTrue("Class " + CP_NAME_4 + " was missing on a region", found_4); + assertTrue("Class " + CP_NAME_5 + " was missing on a region", found_5); assertTrue("Configuration key 'k1' was missing on a region", found5_k1); assertTrue("Configuration key 'k2' was missing on a region", found5_k2); @@ -477,7 +489,7 @@ public class TestClassLoading { for(Map.Entry region: server.getValue().getRegionsLoad().entrySet()) { if (region.getValue().getNameAsString().equals(tableName)) { - // this server server hosts a region of tableName: add this server.. + // this server server hosts a region of TABLE_NAME: add this server.. serverLoadHashMap.put(server.getKey(),server.getValue()); // .. and skip the rest of the regions that it hosts. break; @@ -494,7 +506,7 @@ public class TestClassLoading { boolean success = false; for(int i = 0; i < 5; i++) { if (tableName == null) { - //if no tableName specified, use all servers. + //if no TABLE_NAMEME specified, use all servers. servers = TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager(). getOnlineServers(); diff --git src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java index 36dd289..55a280d 100644 --- src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java +++ src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java @@ -136,8 +136,6 @@ public class TestWALObserver { HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(TEST_TABLE)); final HTableDescriptor htd = createBasic3FamilyHTD(Bytes.toString(TEST_TABLE)); - HRegion region2 = HRegion.createHRegion(hri, - hbaseRootDir, this.conf, htd); Path basedir = new Path(this.hbaseRootDir, Bytes.toString(TEST_TABLE)); deleteDir(basedir); @@ -174,11 +172,9 @@ public class TestWALObserver { for (KeyValue kv : kvs) { if (Arrays.equals(kv.getFamily(), TEST_FAMILY[0])) { foundFamily0 = true; - } - if (Arrays.equals(kv.getFamily(), TEST_FAMILY[2])) { + } else if (Arrays.equals(kv.getFamily(), TEST_FAMILY[2])) { foundFamily2 = true; - } - if (Arrays.equals(kv.getFamily(), TEST_FAMILY[1])) { + } else if (Arrays.equals(kv.getFamily(), TEST_FAMILY[1])) { if (!Arrays.equals(kv.getValue(), TEST_VALUE[1])) { modifiedFamily1 = true; } @@ -199,11 +195,9 @@ public class TestWALObserver { for (KeyValue kv : kvs) { if (Arrays.equals(kv.getFamily(), TEST_FAMILY[0])) { foundFamily0 = true; - } - if (Arrays.equals(kv.getFamily(), TEST_FAMILY[2])) { + } else if (Arrays.equals(kv.getFamily(), TEST_FAMILY[2])) { foundFamily2 = true; - } - if (Arrays.equals(kv.getFamily(), TEST_FAMILY[1])) { + } else if (Arrays.equals(kv.getFamily(), TEST_FAMILY[1])) { if (!Arrays.equals(kv.getValue(), TEST_VALUE[1])) { modifiedFamily1 = true; } diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index 5f97167..f0f88a5 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -55,88 +55,6 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { private static final byte[] T35 = Bytes.toBytes("035"); private static final byte[] T40 = Bytes.toBytes("040"); - - - public void testUsingMetaAndBinary() throws IOException { - FileSystem filesystem = FileSystem.get(conf); - Path rootdir = testDir; - // Up flush size else we bind up when we use default catalog flush of 16k. - HTableDescriptor.META_TABLEDESC.setMemStoreFlushSize(64 * 1024 * 1024); - - HRegion mr = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, - rootdir, this.conf, HTableDescriptor.META_TABLEDESC); - try { - // Write rows for three tables 'A', 'B', and 'C'. - for (char c = 'A'; c < 'D'; c++) { - HTableDescriptor htd = new HTableDescriptor("" + c); - final int last = 128; - final int interval = 2; - for (int i = 0; i <= last; i += interval) { - HRegionInfo hri = new HRegionInfo(htd.getName(), - i == 0? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i), - i == last? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i + interval)); - Put put = new Put(hri.getRegionName()); - put.setWriteToWAL(false); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, - Writables.getBytes(hri)); - mr.put(put, false); - } - } - InternalScanner s = mr.getScanner(new Scan()); - try { - List keys = new ArrayList(); - while(s.next(keys)) { - LOG.info(keys); - keys.clear(); - } - } finally { - s.close(); - } - findRow(mr, 'C', 44, 44); - findRow(mr, 'C', 45, 44); - findRow(mr, 'C', 46, 46); - findRow(mr, 'C', 43, 42); - mr.flushcache(); - findRow(mr, 'C', 44, 44); - findRow(mr, 'C', 45, 44); - findRow(mr, 'C', 46, 46); - findRow(mr, 'C', 43, 42); - // Now delete 'C' and make sure I don't get entries from 'B'. - byte [] firstRowInC = HRegionInfo.createRegionName(Bytes.toBytes("" + 'C'), - HConstants.EMPTY_BYTE_ARRAY, HConstants.ZEROES, false); - Scan scan = new Scan(firstRowInC); - s = mr.getScanner(scan); - try { - List keys = new ArrayList(); - while (s.next(keys)) { - mr.delete(new Delete(keys.get(0).getRow()), null, false); - keys.clear(); - } - } finally { - s.close(); - } - // Assert we get null back (pass -1). - findRow(mr, 'C', 44, -1); - findRow(mr, 'C', 45, -1); - findRow(mr, 'C', 46, -1); - findRow(mr, 'C', 43, -1); - mr.flushcache(); - findRow(mr, 'C', 44, -1); - findRow(mr, 'C', 45, -1); - findRow(mr, 'C', 46, -1); - findRow(mr, 'C', 43, -1); - } finally { - if (mr != null) { - try { - mr.close(); - } catch (Exception e) { - e.printStackTrace(); - } - mr.getLog().closeAndDelete(); - } - } - } - /* * @param mr * @param table @@ -152,7 +70,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { // Find the row. byte [] tofindBytes = Bytes.toBytes((short)rowToFind); byte [] metaKey = HRegionInfo.createRegionName(tableb, tofindBytes, - HConstants.NINES, false); + 99999999999999l, false); LOG.info("find=" + new String(metaKey)); Result r = mr.getClosestRowBefore(metaKey); if (answer == -1) { diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java index 6e1211b..350897a 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java @@ -26,11 +26,11 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; +import java.util.UUID; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; -import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.MD5Hash; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -40,24 +40,28 @@ public class TestHRegionInfo { @Test public void testCreateHRegionInfoName() throws Exception { String tableName = "tablename"; + String encodedTable = UUID.nameUUIDFromBytes(tableName.getBytes()).toString(); final byte [] tn = Bytes.toBytes(tableName); - String startKey = "startkey"; - final byte [] sk = Bytes.toBytes(startKey); + String endKey = "endkey"; + final byte [] ek = Bytes.toBytes(endKey); String id = "id"; // old format region name - byte [] name = HRegionInfo.createRegionName(tn, sk, id, false); + byte [] name = HRegionInfo.createRegionName(tn, null, ek, id.getBytes(), false); String nameStr = Bytes.toString(name); - assertEquals(tableName + "," + startKey + "," + id, nameStr); + assertEquals(encodedTable + (char)HRegionInfo.END_OF_TABLE_NAME + (char)HRegionInfo.DELIMITER + + endKey + (char)HRegionInfo.DELIMITER + id, nameStr); // new format region name. String md5HashInHex = MD5Hash.getMD5AsHex(name); assertEquals(HRegionInfo.MD5_HEX_LENGTH, md5HashInHex.length()); - name = HRegionInfo.createRegionName(tn, sk, id, true); + name = HRegionInfo.createRegionName(tn, null, ek, id.getBytes(), true); nameStr = Bytes.toString(name); - assertEquals(tableName + "," + startKey + "," - + id + "." + md5HashInHex + ".", + assertEquals(encodedTable + (char)HRegionInfo.END_OF_TABLE_NAME + + (char)HRegionInfo.DELIMITER + endKey + + (char)HRegionInfo.DELIMITER + id + + (char)HRegionInfo.ENC_SEPARATOR + md5HashInHex + (char)HRegionInfo.ENC_SEPARATOR, nameStr); } diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfoGetTableName.java src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfoGetTableName.java new file mode 100644 index 0000000..747bcd8 --- /dev/null +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfoGetTableName.java @@ -0,0 +1,74 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.MetaSearchRow; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hbase.util.MD5Hash; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mortbay.log.Log; + +import java.io.IOException; +import java.util.UUID; + +import static org.junit.Assert.*; + +@Category(LargeTests.class) +public class TestHRegionInfoGetTableName { + private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static String TEST_TABLE_NAME = "testHregionInfo"; + @BeforeClass + public static void setupBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(); + HTableDescriptor hTableDescriptor = new HTableDescriptor(TEST_TABLE_NAME); + TEST_UTIL.getHBaseAdmin().createTable(hTableDescriptor); + } + + @AfterClass + public static void tearDownAfterClass() throws IOException { + TEST_UTIL.getHBaseAdmin().disableTable(TEST_TABLE_NAME); + TEST_UTIL.getHBaseAdmin().deleteTable(TEST_TABLE_NAME); + } + + @Test + public void testCreateHRegionInfoName() throws Exception { + + HTable hTable = new HTable(TEST_UTIL.getConfiguration(), + HConstants.META_TABLE_NAME); + + byte[] startRow = MetaSearchRow.getStartRow(TEST_TABLE_NAME.getBytes(), + null); + Scan scan = new Scan(startRow); + Result result = hTable.getScanner(scan).next(); + byte[] tableName = HRegionInfo.getTableName(result.getRow(), + TEST_UTIL.getConfiguration()); + assertEquals(Bytes.toString(tableName), + Bytes.toString(TEST_TABLE_NAME.getBytes())); + } +} + diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java index a092cf0..049a499 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java @@ -493,37 +493,6 @@ public class TestMemStore extends TestCase { m.kvset.size(), m.kvset.size() == 3); } - public void testBinary() throws IOException { - MemStore mc = new MemStore(new Configuration(), KeyValue.ROOT_COMPARATOR); - final int start = 43; - final int end = 46; - for (int k = start; k <= end; k++) { - byte [] kk = Bytes.toBytes(k); - byte [] row = - Bytes.toBytes(".META.,table," + Bytes.toString(kk) + ",1," + k); - KeyValue key = new KeyValue(row, CONTENTS, BASIC, - System.currentTimeMillis(), - (CONTENTSTR + k).getBytes(HConstants.UTF8_ENCODING)); - mc.add(key); - System.out.println(key); -// key = new KeyValue(row, Bytes.toBytes(ANCHORNUM + k), -// System.currentTimeMillis(), -// (ANCHORSTR + k).getBytes(HConstants.UTF8_ENCODING)); -// mc.add(key); -// System.out.println(key); - } - int index = start; - for (KeyValue kv: mc.kvset) { - System.out.println(kv); - byte [] b = kv.getRow(); - // Hardcoded offsets into String - String str = Bytes.toString(b, 13, 4); - byte [] bb = Bytes.toBytes(index); - String bbStr = Bytes.toString(bb); - assertEquals(str, bbStr); - index++; - } - } ////////////////////////////////////////////////////////////////////////////// // Get tests diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 1997abd..3946a35 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -198,9 +198,9 @@ public class TestSplitTransactionOnCluster { hri, new ServerName("any.old.server", 1234, -1)); // Now try splitting.... should fail. And each should successfully // rollback. - this.admin.split(hri.getRegionNameAsString()); - this.admin.split(hri.getRegionNameAsString()); - this.admin.split(hri.getRegionNameAsString()); + this.admin.split(hri.getRegionName()); + this.admin.split(hri.getRegionName()); + this.admin.split(hri.getRegionName()); // Wait around a while and assert count of regions remains constant. for (int i = 0; i < 10; i++) { Thread.sleep(100); @@ -356,7 +356,7 @@ public class TestSplitTransactionOnCluster { private void split(final HRegionInfo hri, final HRegionServer server, final int regionCount) throws IOException, InterruptedException { - this.admin.split(hri.getRegionNameAsString()); + this.admin.split(hri.getRegionName()); while (server.getOnlineRegions().size() <= regionCount) { LOG.debug("Waiting on region to split"); Thread.sleep(100); diff --git src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java index cffdcb6..108c9a1 100644 --- src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java +++ src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java @@ -27,6 +27,8 @@ import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.rest.client.Client; import org.apache.hadoop.hbase.rest.client.Cluster; @@ -42,8 +44,19 @@ import org.junit.experimental.categories.Category; @Category(MediumTests.class) public class TestStatusResource { - private static final byte[] ROOT_REGION_NAME = Bytes.toBytes("-ROOT-,,0"); - private static final byte[] META_REGION_NAME = Bytes.toBytes(".META.,,1"); + + private static final byte[] ROOT_REGION_NAME = HRegionInfo.createRegionName(HConstants.ROOT_TABLE_NAME, + HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, + "0".getBytes(), + false); + + private static final byte[] META_REGION_NAME = HRegionInfo.createRegionName(HConstants.META_TABLE_NAME, + HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, + "1".getBytes(), + false); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static final HBaseRESTTestingUtility REST_TEST_UTIL = diff --git src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java index b6f0ab5..754c04b 100644 --- src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java +++ src/test/java/org/apache/hadoop/hbase/rest/model/TestTableRegionModel.java @@ -45,7 +45,8 @@ public class TestTableRegionModel extends TestCase { " endKey=\"enp5eng=\"" + " startKey=\"YWJyYWNhZGJyYQ==\"" + " id=\"8731042424\"" + - " name=\"testtable,abracadbra,8731042424\"/>"; + " table=\"testtable\"" + + " name=\"testtable,zzyzx,8731042424\"/>"; private JAXBContext context; @@ -55,9 +56,7 @@ public class TestTableRegionModel extends TestCase { } private TableRegionModel buildTestModel() { - TableRegionModel model = - new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION); - return model; + return new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION); } @SuppressWarnings("unused") @@ -77,9 +76,12 @@ public class TestTableRegionModel extends TestCase { assertTrue(Bytes.equals(model.getEndKey(), END_KEY)); assertEquals(model.getId(), ID); assertEquals(model.getLocation(), LOCATION); - assertEquals(model.getName(), - TABLE + "," + Bytes.toString(START_KEY) + "," + Long.toString(ID) + - ".ad9860f031282c46ed431d7af8f94aca."); + byte[] regionInfo = HRegionInfo.createRegionName(TABLE.getBytes(), + START_KEY, + END_KEY, + Long.toString(ID).getBytes(), + true); + assertEquals(model.getName(), Bytes.toStringBinary(regionInfo)); } public void testBuildModel() throws Exception { @@ -90,17 +92,13 @@ public class TestTableRegionModel extends TestCase { TableRegionModel model = buildTestModel(); String modelName = model.getName(); HRegionInfo hri = new HRegionInfo(Bytes.toBytes(TABLE), - START_KEY, END_KEY, false, ID); + START_KEY, + END_KEY, + false, + ID); assertEquals(modelName, hri.getRegionNameAsString()); } - public void testSetName() { - TableRegionModel model = buildTestModel(); - String name = model.getName(); - model.setName(name); - assertEquals(name, model.getName()); - } - public void testFromXML() throws Exception { checkModel(fromXML(AS_XML)); } diff --git src/test/ruby/hbase/admin_test.rb src/test/ruby/hbase/admin_test.rb index 0c2672b..8b97a0f 100644 --- src/test/ruby/hbase/admin_test.rb +++ src/test/ruby/hbase/admin_test.rb @@ -64,6 +64,8 @@ module Hbase # Create table test table name @create_test_name = 'hbase_create_table_test_table' + @uuid_create_test_name = '724e3234-bc78-3e72-b859-3b113f2ec3ee\x02,,' + end define_test "list should return a list of tables" do @@ -166,7 +168,7 @@ module Hbase admin.drop(@create_test_name) end admin.create(@create_test_name, 'foo') - admin.close_region(@create_test_name + ',,0', nil) + admin.close_region(@uuid_create_test_name, nil) end #-------------------------------------------------------------------------------