diff --git a/src/java/org/apache/hadoop/hbase/HMsg.java b/src/java/org/apache/hadoop/hbase/HMsg.java index 11c7a3d..ec14665 100644 --- a/src/java/org/apache/hadoop/hbase/HMsg.java +++ b/src/java/org/apache/hadoop/hbase/HMsg.java @@ -191,7 +191,7 @@ public class HMsg implements Writable { // If null or empty region, don't bother printing it out. if (this.info != null && this.info.getRegionName().length > 0) { sb.append(": "); - sb.append(this.info.toString()); + sb.append(this.info.getRegionNameAsString()); } if (this.message != null && this.message.length > 0) { sb.append(": " + Bytes.toString(this.message)); diff --git a/src/java/org/apache/hadoop/hbase/HRegionInfo.java b/src/java/org/apache/hadoop/hbase/HRegionInfo.java index 11bae89..5c0c4cf 100644 --- a/src/java/org/apache/hadoop/hbase/HRegionInfo.java +++ b/src/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -90,7 +90,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable this.regionId = regionId; this.tableDesc = tableDesc; this.regionName = createRegionName(tableDesc.getName(), null, regionId); - this.regionNameStr = Bytes.toString(this.regionName); + this.regionNameStr = Bytes.toStringBinary(this.regionName); setHashCode(); } @@ -151,7 +151,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable this.offLine = false; this.regionId = regionid; this.regionName = createRegionName(tableDesc.getName(), startKey, regionId); - this.regionNameStr = Bytes.toString(this.regionName); + this.regionNameStr = Bytes.toStringBinary(this.regionName); this.split = split; this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone(); this.startKey = startKey == null? @@ -171,7 +171,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable this.offLine = other.isOffline(); this.regionId = other.getRegionId(); this.regionName = other.getRegionName(); - this.regionNameStr = Bytes.toString(this.regionName); + this.regionNameStr = Bytes.toStringBinary(this.regionName); this.split = other.isSplit(); this.startKey = other.getStartKey(); this.tableDesc = other.getTableDesc(); @@ -361,8 +361,8 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable return "REGION => {" + HConstants.NAME + " => '" + this.regionNameStr + "', STARTKEY => '" + - Bytes.toString(this.startKey) + "', ENDKEY => '" + - Bytes.toString(this.endKey) + + Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + + Bytes.toStringBinary(this.endKey) + "', ENCODED => " + getEncodedName() + "," + (isOffline()? " OFFLINE => true,": "") + (isSplit()? " SPLIT => true,": "") + @@ -424,7 +424,7 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable this.offLine = in.readBoolean(); this.regionId = in.readLong(); this.regionName = Bytes.readByteArray(in); - this.regionNameStr = Bytes.toString(this.regionName); + this.regionNameStr = Bytes.toStringBinary(this.regionName); this.split = in.readBoolean(); this.startKey = Bytes.readByteArray(in); this.tableDesc.readFields(in); diff --git a/src/java/org/apache/hadoop/hbase/HStoreKey.java b/src/java/org/apache/hadoop/hbase/HStoreKey.java index 88b9415..5068431 100644 --- a/src/java/org/apache/hadoop/hbase/HStoreKey.java +++ b/src/java/org/apache/hadoop/hbase/HStoreKey.java @@ -456,7 +456,7 @@ public class HStoreKey implements WritableComparable, HeapSize { return getDelimiter(b, 0, b.length, COLUMN_FAMILY_DELIMITER); } - private static int getRequiredDelimiterInReverse(final byte [] b, + static int getRequiredDelimiterInReverse(final byte [] b, final int offset, final int length, final int delimiter) { int index = getDelimiterInReverse(b, offset, length, delimiter); if (index < 0) { @@ -471,7 +471,7 @@ public class HStoreKey implements WritableComparable, HeapSize { * @return Index of delimiter having started from end of b moving * leftward. */ - private static int getDelimiter(final byte [] b, int offset, final int length, + static int getDelimiter(final byte [] b, int offset, final int length, final int delimiter) { if (b == null) { throw new NullPointerException(); diff --git a/src/java/org/apache/hadoop/hbase/KeyValue.java b/src/java/org/apache/hadoop/hbase/KeyValue.java index 4532ee9..6e987e3 100644 --- a/src/java/org/apache/hadoop/hbase/KeyValue.java +++ b/src/java/org/apache/hadoop/hbase/KeyValue.java @@ -620,6 +620,7 @@ public class KeyValue implements Writable, HeapSize { } /** + * Use for logging. * @param b Key portion of a KeyValue. * @param o Offset to start of key * @param l Length of key. @@ -627,7 +628,7 @@ public class KeyValue implements Writable, HeapSize { */ public static String keyToString(final byte [] b, final int o, final int l) { int rowlength = Bytes.toShort(b, o); - String row = Bytes.toString(b, o + Bytes.SIZEOF_SHORT, rowlength); + String row = Bytes.toStringBinary(b, o + Bytes.SIZEOF_SHORT, rowlength); int columnoffset = o + Bytes.SIZEOF_SHORT + 1 + rowlength; int familylength = b[columnoffset - 1]; int columnlength = l - ((columnoffset - o) + TIMESTAMP_TYPE_SIZE); @@ -699,6 +700,10 @@ public class KeyValue implements Writable, HeapSize { return this.offset + ROW_OFFSET; } + public String getKeyString() { + return Bytes.toStringBinary(getBuffer(), getKeyOffset(), getKeyLength()); + } + /** * @return Length of key portion. */ diff --git a/src/java/org/apache/hadoop/hbase/RegionHistorian.java b/src/java/org/apache/hadoop/hbase/RegionHistorian.java index 2a1bdfa..8817fcc 100644 --- a/src/java/org/apache/hadoop/hbase/RegionHistorian.java +++ b/src/java/org/apache/hadoop/hbase/RegionHistorian.java @@ -103,7 +103,7 @@ public class RegionHistorian implements HConstants { * Region name as a string * @return List of RegionHistoryInformation or null if we're offline. */ - public List getRegionHistory(String regionName) { + public List getRegionHistory(byte [] regionName) { if (!isOnline()) { return null; } diff --git a/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java index a7d6093..af8319f 100644 --- a/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -540,7 +540,7 @@ public class HConnectionManager implements HConstants { for (int tries = 0; true; tries++) { if (tries >= numRetries) { throw new NoServerForRegionException("Unable to find region for " - + Bytes.toString(row) + " after " + numRetries + " tries."); + + Bytes.toStringBinary(row) + " after " + numRetries + " tries."); } try { diff --git a/src/java/org/apache/hadoop/hbase/io/HeapSize.java b/src/java/org/apache/hadoop/hbase/io/HeapSize.java index 9133907..7eb72cf 100644 --- a/src/java/org/apache/hadoop/hbase/io/HeapSize.java +++ b/src/java/org/apache/hadoop/hbase/io/HeapSize.java @@ -71,7 +71,7 @@ public interface HeapSize { static final int BLOCK_SIZE_TAX = 8; static final int BYTE_BUFFER = 56; - + /** * @return Approximate 'exclusive deep size' of implementing object. Includes * count of payload and hosting object sizings. diff --git a/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 0d80121..83e216b 100644 --- a/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -154,7 +154,7 @@ public class HFile { // Name for this object used when logging or in toString. Is either // the result of a toString on stream or else toString of passed file Path. - private String name; + protected String name; // Total uncompressed bytes, maybe calculate a compression ratio later. private int totalBytes = 0; @@ -217,7 +217,7 @@ public class HFile { */ public Writer(FileSystem fs, Path path) throws IOException { - this(fs, path, DEFAULT_BLOCKSIZE, null, null, false); + this(fs, path, DEFAULT_BLOCKSIZE, (Compression.Algorithm) null, null); } /** @@ -236,7 +236,7 @@ public class HFile { this(fs, path, blocksize, compress == null? DEFAULT_COMPRESSION_ALGORITHM: Compression.getCompressionAlgorithmByName(compress), - comparator, false); + comparator); } /** @@ -246,15 +246,13 @@ public class HFile { * @param blocksize * @param compress * @param comparator - * @param bloomfilter * @throws IOException */ public Writer(FileSystem fs, Path path, int blocksize, Compression.Algorithm compress, - final RawComparator comparator, - final boolean bloomfilter) + final RawComparator comparator) throws IOException { - this(fs.create(path), blocksize, compress, comparator, bloomfilter); + this(fs.create(path), blocksize, compress, comparator); this.closeOutputStream = true; this.name = path.toString(); this.path = path; @@ -269,26 +267,22 @@ public class HFile { * @throws IOException */ public Writer(final FSDataOutputStream ostream, final int blocksize, - final String compress, final RawComparator c) + final String compress, final RawComparator c) throws IOException { this(ostream, blocksize, - compress == null? DEFAULT_COMPRESSION_ALGORITHM: - Compression.getCompressionAlgorithmByName(compress), c, false); + Compression.getCompressionAlgorithmByName(compress), c); } - + /** * Constructor that takes a stream. * @param ostream Stream to use. * @param blocksize * @param compress * @param c - * @param bloomfilter * @throws IOException */ public Writer(final FSDataOutputStream ostream, final int blocksize, - final Compression.Algorithm compress, - final RawComparator c, - final boolean bloomfilter) + final Compression.Algorithm compress, final RawComparator c) throws IOException { this.outputStream = ostream; this.closeOutputStream = false; @@ -726,11 +720,11 @@ public class HFile { } protected String toStringFirstKey() { - return Bytes.toString(getFirstKey()); + return Bytes.toStringBinary(getFirstKey()); } protected String toStringLastKey() { - return Bytes.toString(getFirstKey()); + return Bytes.toStringBinary(getFirstKey()); } public long length() { @@ -1189,7 +1183,7 @@ public class HFile { } public String getKeyString() { - return Bytes.toString(block.array(), block.arrayOffset() + + return Bytes.toStringBinary(block.array(), block.arrayOffset() + block.position(), currKeyLen); } @@ -1240,6 +1234,10 @@ public class HFile { } } } + + public String getTrailerInfo() { + return trailer.toString(); + } } /* * The RFile has a fixed trailer which contains offsets to other variable @@ -1267,10 +1265,8 @@ public class HFile { static int trailerSize() { // Keep this up to date... - final int intSize = 4; - final int longSize = 8; return - ( intSize * 5 ) + - ( longSize * 4 ) + + ( Bytes.SIZEOF_INT * 5 ) + + ( Bytes.SIZEOF_LONG * 4 ) + TRAILERBLOCKMAGIC.length; } diff --git a/src/java/org/apache/hadoop/hbase/mapred/TableSplit.java b/src/java/org/apache/hadoop/hbase/mapred/TableSplit.java index 435e2a7..b754b6a 100644 --- a/src/java/org/apache/hadoop/hbase/mapred/TableSplit.java +++ b/src/java/org/apache/hadoop/hbase/mapred/TableSplit.java @@ -103,7 +103,7 @@ public class TableSplit implements InputSplit, Comparable { @Override public String toString() { return m_regionLocation + ":" + - Bytes.toString(m_startRow) + "," + Bytes.toString(m_endRow); + Bytes.toStringBinary(m_startRow) + "," + Bytes.toStringBinary(m_endRow); } public int compareTo(TableSplit o) { diff --git a/src/java/org/apache/hadoop/hbase/master/BaseScanner.java b/src/java/org/apache/hadoop/hbase/master/BaseScanner.java index 0297bd3..799c577 100644 --- a/src/java/org/apache/hadoop/hbase/master/BaseScanner.java +++ b/src/java/org/apache/hadoop/hbase/master/BaseScanner.java @@ -328,8 +334,9 @@ abstract class BaseScanner extends Chore implements HConstants { } if (LOG.isDebugEnabled()) { - LOG.debug(split.getRegionNameAsString() + - " no longer has references to " + Bytes.toString(parent)); + LOG.debug(split.getRegionNameAsString() + "/" + split.getEncodedName() + + " no longer has references to " + Bytes.toStringBinary(parent) + ); } Delete delete = new Delete(parent); diff --git a/src/java/org/apache/hadoop/hbase/master/HMaster.java b/src/java/org/apache/hadoop/hbase/master/HMaster.java index cb9295e..205611c 100644 --- a/src/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/src/java/org/apache/hadoop/hbase/master/HMaster.java @@ -951,6 +951,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, } // Arguments are regionname and an optional server name. byte [] regionname = ((ImmutableBytesWritable)args[0]).get(); + LOG.debug("Attempting to close region: " + Bytes.toStringBinary(regionname)); String servername = null; if (args.length == 2) { servername = Bytes.toString(((ImmutableBytesWritable)args[1]).get()); diff --git a/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java b/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java index 5537360..5c14a60 100644 --- a/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java +++ b/src/java/org/apache/hadoop/hbase/master/ProcessRegionClose.java @@ -53,7 +53,7 @@ class ProcessRegionClose extends ProcessRegionStatusChange { @Override public String toString() { return "ProcessRegionClose of " + this.regionInfo.getRegionNameAsString() + - ", " + this.offlineRegion; + ", " + this.offlineRegion + ", reassign: " + this.reassignRegion; } @Override @@ -83,8 +83,12 @@ class ProcessRegionClose extends ProcessRegionStatusChange { result = result == null ? true : result; } else if (reassignRegion) { + LOG.info("region set as unassigned: " + regionInfo.getRegionNameAsString()); // we are reassigning the region eventually, so set it unassigned master.regionManager.setUnassigned(regionInfo, false); + } else { + LOG.info("Region was neither offlined, or asked to be reassigned, what gives: " + + regionInfo.getRegionNameAsString()); } return result == null ? true : result; diff --git a/src/java/org/apache/hadoop/hbase/master/RegionManager.java b/src/java/org/apache/hadoop/hbase/master/RegionManager.java index 6c65e2a..f50a64b 100644 --- a/src/java/org/apache/hadoop/hbase/master/RegionManager.java +++ b/src/java/org/apache/hadoop/hbase/master/RegionManager.java @@ -499,7 +499,7 @@ class RegionManager implements HConstants { if (currentRegion.isRootRegion() || currentRegion.isMetaTable()) { continue; } - String regionName = currentRegion.getRegionNameAsString(); + final String regionName = currentRegion.getRegionNameAsString(); if (regionIsInTransition(regionName)) { skipped++; continue; diff --git a/src/java/org/apache/hadoop/hbase/master/ServerManager.java b/src/java/org/apache/hadoop/hbase/master/ServerManager.java index bd462f4..47a31e2 100644 --- a/src/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/src/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -475,8 +475,8 @@ class ServerManager implements HConstants { // Should we tell it close regions because its overloaded? If its // currently opening regions, leave it alone till all are open. - LOG.debug("Process all wells: " + serverInfo + " openingCount: " + openingCount + - ", nobalancingCount: " + nobalancingCount); +// LOG.debug("Process all wells: " + serverInfo + " openingCount: " + openingCount + +// ", nobalancingCount: " + nobalancingCount); if ((openingCount < this.nobalancingCount)) { this.master.regionManager.assignRegions(serverInfo, mostLoadedRegions, returnMsgs); diff --git a/src/java/org/apache/hadoop/hbase/master/TableDelete.java b/src/java/org/apache/hadoop/hbase/master/TableDelete.java index 526fe32..0bde1b1 100644 --- a/src/java/org/apache/hadoop/hbase/master/TableDelete.java +++ b/src/java/org/apache/hadoop/hbase/master/TableDelete.java @@ -45,6 +45,7 @@ class TableDelete extends TableOperation { protected void processScanItem(String serverName, final HRegionInfo info) throws IOException { if (isEnabled(info)) { + LOG.debug("Region still enabled: " + info.toString()); throw new TableNotDisabledException(tableName); } } diff --git a/src/java/org/apache/hadoop/hbase/master/TableOperation.java b/src/java/org/apache/hadoop/hbase/master/TableOperation.java index bf24065..f882a8e 100644 --- a/src/java/org/apache/hadoop/hbase/master/TableOperation.java +++ b/src/java/org/apache/hadoop/hbase/master/TableOperation.java @@ -95,7 +95,7 @@ abstract class TableOperation implements HConstants { emptyRows.add(values.getRow()); LOG.error(Bytes.toString(CATALOG_FAMILY) + ":" + Bytes.toString(REGIONINFO_QUALIFIER) + " not found on " + - Bytes.toString(values.getRow())); + Bytes.toStringBinary(values.getRow())); continue; } String serverAddress = diff --git a/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index 6b5596b..5da31c7 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -114,14 +114,14 @@ class CompactSplitThread extends Thread implements HConstants { continue; } catch (IOException ex) { LOG.error("Compaction/Split failed" + - (r != null ? (" for region " + Bytes.toString(r.getRegionName())) : ""), + (r != null ? (" for region " + r.getRegionNameAsString()) : ""), RemoteExceptionHandler.checkIOException(ex)); if (!server.checkFileSystem()) { break; } } catch (Exception ex) { LOG.error("Compaction failed" + - (r != null ? (" for region " + Bytes.toString(r.getRegionName())) : ""), + (r != null ? (" for region " + r.getRegionNameAsString()) : ""), ex); if (!server.checkFileSystem()) { break; @@ -155,7 +155,7 @@ class CompactSplitThread extends Thread implements HConstants { r.setForceMajorCompaction(force); if (LOG.isDebugEnabled()) { LOG.debug("Compaction " + (force? "(major) ": "") + - "requested for region " + Bytes.toString(r.getRegionName()) + + "requested for region " + r.getRegionNameAsString() + "/" + r.getRegionInfo().getEncodedName() + (why != null && !why.isEmpty()? " because: " + why: "")); } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java index d4a82e5..f594129 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -518,6 +518,11 @@ public class HRegion implements HConstants { // , Writable{ return this.regionInfo.getRegionName(); } + /** @return region name as string for logging */ + public String getRegionNameAsString() { + return this.regionInfo.getRegionNameAsString(); + } + /** @return HTableDescriptor for this region */ public HTableDescriptor getTableDesc() { return this.regionInfo.getTableDesc(); @@ -1017,6 +1022,7 @@ public class HRegion implements HConstants { // , Writable{ Store store = getStore(family); KeyValue kv = new KeyValue(row, HConstants.LATEST_TIMESTAMP); // get the closest key. (HStore.getRowKeyAtOrBefore can return null) + LOG.debug("getClosestRowBefore looking for: " + Bytes.toStringBinary(row)); key = store.getRowKeyAtOrBefore(kv); if (key == null) { return null; diff --git a/src/java/org/apache/hadoop/hbase/regionserver/Store.java b/src/java/org/apache/hadoop/hbase/regionserver/Store.java index ab93230..25265b4 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -543,7 +543,7 @@ public class Store implements HConstants { */ private HFile.Writer getWriter(final Path basedir) throws IOException { return StoreFile.getWriter(this.fs, basedir, this.blocksize, - this.compression, this.comparator.getRawComparator(), this.bloomfilter); + this.compression, this.comparator.getRawComparator()); } /* diff --git a/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 453ac9c..a4810e3 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -19,18 +19,11 @@ */ package org.apache.hadoop.hbase.regionserver; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.util.Map; -import java.util.Random; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -42,6 +35,14 @@ import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.LruBlockCache; import org.apache.hadoop.hbase.util.Bytes; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + /** * A Store data file. Stores usually have one or more of these files. They * are produced by flushing the memcache to disk. To @@ -81,7 +82,7 @@ public class StoreFile implements HConstants { // If true, this file was product of a major compaction. Its then set // whenever you get a Reader. private AtomicBoolean majorCompaction = null; - + /* * Regex that will work for straight filenames and for reference names. * If reference, then the regex has more than just one group. Group 1 is @@ -275,9 +276,12 @@ public class StoreFile implements HConstants { this.majorCompaction.set(mc); } } + + // TODO read in bloom filter here, ignore if the column family config says + // "no bloom filter" even if there is one in the hfile. return this.reader; } - + /** * Override to add some customization on HFile.Reader */ @@ -405,7 +409,7 @@ public class StoreFile implements HConstants { */ public static HFile.Writer getWriter(final FileSystem fs, final Path dir) throws IOException { - return getWriter(fs, dir, DEFAULT_BLOCKSIZE_SMALL, null, null, false); + return getWriter(fs, dir, DEFAULT_BLOCKSIZE_SMALL, null, null); } /** @@ -418,13 +422,12 @@ public class StoreFile implements HConstants { * @param blocksize * @param algorithm Pass null to get default. * @param c Pass null to get default. - * @param filter BloomFilter * @return HFile.Writer * @throws IOException */ public static HFile.Writer getWriter(final FileSystem fs, final Path dir, - final int blocksize, final Compression.Algorithm algorithm, - final KeyValue.KeyComparator c, final boolean filter) + final int blocksize, final Compression.Algorithm algorithm, + final KeyValue.KeyComparator c) throws IOException { if (!fs.exists(dir)) { fs.mkdirs(dir); @@ -432,7 +435,7 @@ public class StoreFile implements HConstants { Path path = getUniqueFile(fs, dir); return new HFile.Writer(fs, path, blocksize, algorithm == null? HFile.DEFAULT_COMPRESSION_ALGORITHM: algorithm, - c == null? KeyValue.KEY_COMPARATOR: c, filter); + c == null? KeyValue.KEY_COMPARATOR: c); } /** diff --git a/src/java/org/apache/hadoop/hbase/util/Bytes.java b/src/java/org/apache/hadoop/hbase/util/Bytes.java index 4c9d9bb..95aa5a3 100644 --- a/src/java/org/apache/hadoop/hbase/util/Bytes.java +++ b/src/java/org/apache/hadoop/hbase/util/Bytes.java @@ -24,6 +24,7 @@ import java.io.DataOutput; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.nio.ByteBuffer; +import java.nio.charset.Charset; import java.util.Comparator; import java.math.BigInteger; @@ -32,6 +33,8 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.WritableComparator; import org.apache.hadoop.io.WritableUtils; +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.Log; /** * Utility class that handles byte arrays, conversions to/from other types, @@ -265,6 +268,94 @@ public class Bytes { return result; } + public static String toStringBinary(final byte []b) { + return toStringBinary(b, 0, b.length); + } + + public static String toStringBinary(final byte []b, int off, int len) { + String result = null; + try { + String first = new String(b, off, len, "ISO-8859-1"); + result = ""; + for (int i = 0; i < first.length() ; ++i ) { + int ch = first.charAt(i) & 0xFF; + if ( (ch >= '0' && ch <= '9') + || (ch >= 'A' && ch <= 'Z') + || (ch >= 'a' && ch <= 'z') + || ch == ',' + || ch == '_' + || ch == '-' + || ch == ':' + || ch == '.') { + result += first.charAt(i); + } else { + result += String.format("\\x%02X", ch); + } + } + } catch ( UnsupportedEncodingException e) { + e.printStackTrace(); + } + return result; + } + + private static boolean isHexDigit(char c) { + return + (c >= 'A' && c <= 'F') || + (c >= '0' && c <= '9'); + } + + /** + * Takes a ASCII digit in the range A-F0-9 and returns + * the corresponding integer/ordinal value. + * @param ch + * @return + */ + public static byte toBinaryFromHex(byte ch) { + if ( ch >= 'A' && ch <= 'F' ) + return (byte) ((byte)10 + (byte) (ch - 'A')); + // else + return (byte) (ch - '0'); + } + + public static byte [] toBytesBinary(String in) { + // this may be bigger than we need, but lets be safe. + byte [] b = new byte[in.length()]; + int size = 0; + for (int i = 0; i < in.length(); ++i) { + char ch = in.charAt(i); + if (ch == '\\') { + // begin hex escape: + char next = in.charAt(i+1); + if (next != 'x') { + // invalid escape sequence, ignore this one. + b[size++] = (byte)ch; + continue; + } + // ok, take next 2 hex digits. + char hd1 = in.charAt(i+2); + char hd2 = in.charAt(i+3); + + // they need to be A-F0-9: + if ( ! isHexDigit(hd1) || + ! isHexDigit(hd2) ) { + // bogus escape code, ignore: + continue; + } + // turn hex ASCII digit -> number + byte d = (byte) ((toBinaryFromHex((byte)hd1) << 4) + toBinaryFromHex((byte)hd2)); + + b[size++] = d; + i += 3; // skip 3 + } else { + b[size++] = (byte) ch; + } + } + // resize: + byte [] b2 = new byte[size]; + System.arraycopy(b, 0, b2, 0, size); + return b2; + } + /** * Converts a string to a UTF-8 byte array. * @param s diff --git a/src/test/org/apache/hadoop/hbase/TestTable.java b/src/test/org/apache/hadoop/hbase/TestTable.java index 2100280..c3be7dc 100644 --- a/src/test/org/apache/hadoop/hbase/TestTable.java +++ b/src/test/org/apache/hadoop/hbase/TestTable.java @@ -110,8 +110,8 @@ public class TestTable extends HBaseClusterTestCase { } // All threads are now dead. Count up how many tables were created and // how many failed w/ appropriate exception. - assertTrue(successes.get() == 1); - assertTrue(failures.get() == (count - 1)); + assertEquals(1, successes.get()); + assertEquals(count - 1, failures.get()); } /** diff --git a/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java index ed589f8..a1ad2f9 100644 --- a/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -114,7 +114,7 @@ public class TestHFile extends HBaseTestCase { Path ncTFile = new Path(ROOT_DIR, "basic.hfile"); FSDataOutputStream fout = createFSOutput(ncTFile); Writer writer = new Writer(fout, minBlockSize, - Compression.getCompressionAlgorithmByName(codec), null, false); + Compression.getCompressionAlgorithmByName(codec), null); LOG.info(writer); writeRecords(writer); fout.close(); @@ -178,7 +178,7 @@ public class TestHFile extends HBaseTestCase { Path mFile = new Path(ROOT_DIR, "meta.hfile"); FSDataOutputStream fout = createFSOutput(mFile); Writer writer = new Writer(fout, minBlockSize, - Compression.getCompressionAlgorithmByName(compress), null, false); + Compression.getCompressionAlgorithmByName(compress), null); someTestingWithMetaBlock(writer); writer.close(); fout.close(); @@ -204,7 +204,7 @@ public class TestHFile extends HBaseTestCase { Path mFile = new Path(ROOT_DIR, "nometa.hfile"); FSDataOutputStream fout = createFSOutput(mFile); Writer writer = new Writer(fout, minBlockSize, - Compression.Algorithm.NONE, null, false); + Compression.Algorithm.NONE, null); writer.append("foo".getBytes(), "value".getBytes()); writer.close(); fout.close(); @@ -226,7 +226,7 @@ public class TestHFile extends HBaseTestCase { public void testComparator() throws IOException { Path mFile = new Path(ROOT_DIR, "meta.tfile"); FSDataOutputStream fout = createFSOutput(mFile); - Writer writer = new Writer(fout, minBlockSize, null, + Writer writer = new Writer(fout, minBlockSize, (Compression.Algorithm) null, new RawComparator() { @Override public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, @@ -238,7 +238,7 @@ public class TestHFile extends HBaseTestCase { public int compare(byte[] o1, byte[] o2) { return compare(o1, 0, o1.length, o2, 0, o2.length); } - }, false); + }); writer.append("3".getBytes(), "0".getBytes()); writer.append("2".getBytes(), "0".getBytes()); writer.append("1".getBytes(), "0".getBytes()); diff --git a/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index 9f6601b..fd8d6b9 100644 --- a/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ b/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -19,9 +19,6 @@ */ package org.apache.hadoop.hbase.regionserver; -import java.io.IOException; -import java.nio.ByteBuffer; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; @@ -34,6 +31,10 @@ import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hdfs.MiniDFSCluster; + +import java.io.IOException; +import java.nio.ByteBuffer; + /** * Test HStoreFile */ @@ -71,11 +72,11 @@ public class TestStoreFile extends HBaseTestCase { // Make up a directory hierarchy that has a regiondir and familyname. HFile.Writer writer = StoreFile.getWriter(this.fs, new Path(new Path(this.testDir, "regionname"), "familyname"), - 2 * 1024, null, null, false); + 2 * 1024, null, null); writeStoreFile(writer); checkHalfHFile(new StoreFile(this.fs, writer.getPath(), conf)); } - + /* * Writes HStoreKey and ImmutableBytes data to passed writer and * then closes it. @@ -110,7 +111,7 @@ public class TestStoreFile extends HBaseTestCase { Path dir = new Path(storedir, "1234567890"); // Make a store file and write data to it. HFile.Writer writer = StoreFile.getWriter(this.fs, dir, 8 * 1024, null, - null, false); + null); writeStoreFile(writer); StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf); HFile.Reader reader = hsf.getReader(); diff --git a/src/webapps/master/master.jsp b/src/webapps/master/master.jsp index f4940cf..0515ec1 100644 --- a/src/webapps/master/master.jsp +++ b/src/webapps/master/master.jsp @@ -147,7 +147,7 @@ $(document).ready(function(){ Arrays.sort(serverNames); for (String serverName: serverNames) { HServerInfo hsi = serverToServerInfos.get(serverName); - String hostname = hsi.getName() + ":" + hsi.getInfoPort(); + String hostname = hsi.getServerAddress().getInetSocketAddress().getAddress().getHostAddress() + ":" + hsi.getInfoPort(); String url = "http://" + hostname + "/"; totalRegions += hsi.getLoad().getNumberOfRegions(); totalRequests += hsi.getLoad().getNumberOfRequests() / interval; diff --git a/src/webapps/master/regionhistorian.jsp b/src/webapps/master/regionhistorian.jsp index efbc99f..ff150e3 100644 --- a/src/webapps/master/regionhistorian.jsp +++ b/src/webapps/master/regionhistorian.jsp @@ -5,10 +5,12 @@ import="org.apache.hadoop.hbase.RegionHistorian" import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.RegionHistorian.RegionHistoryInformation" - import="org.apache.hadoop.hbase.HConstants"%><% - String regionName = request.getParameter("regionname"); + import="org.apache.hadoop.hbase.HConstants"%> +<%@ page import="org.apache.hadoop.hbase.util.Bytes" %> +<% + String regionName = request.getParameter("regionname"); HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); - List informations = RegionHistorian.getInstance().getRegionHistory(regionName); + List informations = RegionHistorian.getInstance().getRegionHistory(Bytes.toBytesBinary(regionName)); // Pattern used so we can wrap a regionname in an href. Pattern pattern = Pattern.compile(RegionHistorian.SPLIT_PREFIX + "(.*)$"); %> diff --git a/src/webapps/master/table.jsp b/src/webapps/master/table.jsp index ccc37c3..291013b 100644 --- a/src/webapps/master/table.jsp +++ b/src/webapps/master/table.jsp @@ -15,6 +15,8 @@ import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.master.MetaRegion" import="org.apache.hadoop.hbase.util.Bytes" + import="java.io.IOException" + import="java.util.Map" import="org.apache.hadoop.hbase.HConstants"%><% HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); HBaseConfiguration conf = master.getConfiguration(); @@ -38,7 +40,6 @@ if ( action != null ) { %> - @@ -132,17 +133,17 @@ hriEntry.getValue()).getInfoPort(); String urlRegionHistorian = - "/regionhistorian.jsp?regionname=" + - URLEncoder.encode(hriEntry.getKey().getRegionNameAsString(), "UTF-8"); + "/regionhistorian.jsp?regionname="+ + Bytes.toStringBinary(hriEntry.getKey().getRegionName()); String urlRegionServer = "http://" + hriEntry.getValue().getHostname().toString() + ":" + infoPort + "/"; %> - <%= hriEntry.getKey().getRegionNameAsString()%> + <%= Bytes.toStringBinary(hriEntry.getKey().getRegionName())%> <%= hriEntry.getValue().toString() %> - <%= hriEntry.getKey().getEncodedName()%> <%= Bytes.toString(hriEntry.getKey().getStartKey())%> - <%= Bytes.toString(hriEntry.getKey().getEndKey())%> + <%= hriEntry.getKey().getEncodedName()%> <%= Bytes.toStringBinary(hriEntry.getKey().getStartKey())%> + <%= Bytes.toStringBinary(hriEntry.getKey().getEndKey())%> <% } %> diff --git a/src/webapps/regionserver/regionserver.jsp b/src/webapps/regionserver/regionserver.jsp index c804670..2e13615 100644 --- a/src/webapps/regionserver/regionserver.jsp +++ b/src/webapps/regionserver/regionserver.jsp @@ -48,7 +48,7 @@ HServerLoad.RegionLoad load = regionServer.createRegionLoad(r.getRegionName()); %> <%= r.getRegionNameAsString() %><%= r.getEncodedName() %> - <%= Bytes.toString(r.getStartKey()) %><%= Bytes.toString(r.getEndKey()) %> + <%= Bytes.toStringBinary(r.getStartKey()) %><%= Bytes.toStringBinary(r.getEndKey()) %> <%= load.toString() %> <% } %>