Index: src/test/java/org/apache/hadoop/hbase/client/TestOperation.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/client/TestOperation.java (revision 0) +++ src/test/java/org/apache/hadoop/hbase/client/TestOperation.java (revision 0) @@ -0,0 +1,130 @@ +/** + * Copyright 2009 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import org.junit.Test; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hbase.util.Bytes; + +import org.codehaus.jackson.map.ObjectMapper; + +/** + * Run tests that use the funtionality of the Operation superclass for + * Puts, Gets, Deletes, Scans, and MultiPuts. + */ +public class TestOperation { + private static byte [] ROW = Bytes.toBytes("testRow"); + private static byte [] FAMILY = Bytes.toBytes("testFamily"); + private static byte [] QUALIFIER = Bytes.toBytes("testQualifier"); + private static byte [] VALUE = Bytes.toBytes("testValue"); + + private static ObjectMapper mapper = new ObjectMapper(); + + /** + * Test the client Operations' JSON encoding to ensure that produced JSON is + * parseable and that the details are present and not corrupted. + * @throws IOException + */ + @Test + public void testOperationJSON() + throws IOException { + // produce a Scan Operation + Scan scan = new Scan(ROW); + scan.addColumn(FAMILY, QUALIFIER); + // get its JSON representation, and parse it + String json = scan.toJSON(); + Map parsedJSON = mapper.readValue(json, HashMap.class); + // check for the row + assertEquals("startRow incorrect in Scan.toJSON()", + Bytes.toStringBinary(ROW), parsedJSON.get("startRow")); + // check for the family and the qualifier. + List familyInfo = (List) ((Map) parsedJSON.get("families")).get( + Bytes.toStringBinary(FAMILY)); + assertNotNull("Family absent in Scan.toJSON()", familyInfo); + assertEquals("Qualifier absent in Scan.toJSON()", 1, familyInfo.size()); + assertEquals("Qualifier incorrect in Scan.toJSON()", + Bytes.toStringBinary(QUALIFIER), + familyInfo.get(0)); + + // produce a Get Operation + Get get = new Get(ROW); + get.addColumn(FAMILY, QUALIFIER); + // get its JSON representation, and parse it + json = get.toJSON(); + parsedJSON = mapper.readValue(json, HashMap.class); + // check for the row + assertEquals("row incorrect in Get.toJSON()", + Bytes.toStringBinary(ROW), parsedJSON.get("row")); + // check for the family and the qualifier. + familyInfo = (List) ((Map) parsedJSON.get("families")).get( + Bytes.toStringBinary(FAMILY)); + assertNotNull("Family absent in Get.toJSON()", familyInfo); + assertEquals("Qualifier absent in Get.toJSON()", 1, familyInfo.size()); + assertEquals("Qualifier incorrect in Get.toJSON()", + Bytes.toStringBinary(QUALIFIER), + familyInfo.get(0)); + + // produce a Put operation + Put put = new Put(ROW); + put.add(FAMILY, QUALIFIER, VALUE); + // get its JSON representation, and parse it + json = put.toJSON(); + parsedJSON = mapper.readValue(json, HashMap.class); + // check for the row + assertEquals("row absent in Put.toJSON()", + Bytes.toStringBinary(ROW), parsedJSON.get("row")); + // check for the family and the qualifier. + familyInfo = (List) ((Map) parsedJSON.get("families")).get( + Bytes.toStringBinary(FAMILY)); + assertNotNull("Family absent in Put.toJSON()", familyInfo); + assertEquals("KeyValue absent in Put.toJSON()", 1, familyInfo.size()); + Map kvMap = (Map) familyInfo.get(0); + assertEquals("Qualifier incorrect in Put.toJSON()", + Bytes.toStringBinary(QUALIFIER), + kvMap.get("qualifier")); + assertEquals("Value length incorrect in Put.toJSON()", + VALUE.length, kvMap.get("vlen")); + + // produce a Delete operation + Delete delete = new Delete(ROW); + delete.deleteColumn(FAMILY, QUALIFIER); + // get its JSON representation, and parse it + json = delete.toJSON(); + parsedJSON = mapper.readValue(json, HashMap.class); + // check for the row + assertEquals("row absent in Delete.toJSON()", + Bytes.toStringBinary(ROW), parsedJSON.get("row")); + // check for the family and the qualifier. + familyInfo = (List) ((Map) parsedJSON.get("families")).get( + Bytes.toStringBinary(FAMILY)); + assertNotNull("Family absent in Delete.toJSON()", familyInfo); + assertEquals("KeyValue absent in Delete.toJSON()", 1, familyInfo.size()); + kvMap = (Map) familyInfo.get(0); + assertEquals("Qualifier incorrect in Delete.toJSON()", + Bytes.toStringBinary(QUALIFIER), kvMap.get("qualifier")); + } +} Index: src/main/java/org/apache/hadoop/hbase/KeyValue.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/KeyValue.java (revision 1160468) +++ src/main/java/org/apache/hadoop/hbase/KeyValue.java (working copy) @@ -629,6 +629,7 @@ stringMap.put("family", Bytes.toStringBinary(getFamily())); stringMap.put("qualifier", Bytes.toStringBinary(getQualifier())); stringMap.put("timestamp", getTimestamp()); + stringMap.put("vlen", getValueLength()); return stringMap; } Index: src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java (revision 1160468) +++ src/main/java/org/apache/hadoop/hbase/ipc/WritableRpcEngine.java (working copy) @@ -35,7 +35,11 @@ import org.apache.commons.logging.*; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.client.Operation; import org.apache.hadoop.hbase.io.HbaseObjectWritable; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Objects; import org.apache.hadoop.io.*; import org.apache.hadoop.ipc.VersionedProtocol; @@ -43,6 +47,8 @@ import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; import org.apache.hadoop.conf.*; +import org.codehaus.jackson.map.ObjectMapper; + /** An RpcEngine implementation for Writable data. */ class WritableRpcEngine implements RpcEngine { // LOG is NOT in hbase subpackage intentionally so that the default HBase @@ -246,6 +252,21 @@ private boolean verbose; private boolean authorize = false; + // for JSON encoding + private static ObjectMapper mapper = new ObjectMapper(); + + private static final String WARN_RESPONSE_TIME = + "hbase.ipc.warn.response.time"; + private static final String WARN_RESPONSE_SIZE = + "hbase.ipc.warn.response.size"; + + /** Default value for above params */ + private static final int DEFAULT_WARN_RESPONSE_TIME = 1000; // milliseconds + private static final int DEFAULT_WARN_RESPONSE_SIZE = 100 * 1024 * 1024; + + private final int warnResponseTime; + private final int warnResponseSize; + private static String classNameBase(String className) { String[] names = className.split("\\.", -1); if (names == null || names.length == 0) { @@ -282,6 +303,11 @@ this.authorize = conf.getBoolean( ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false); + + this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME, + DEFAULT_WARN_RESPONSE_TIME); + this.warnResponseSize = conf.getInt(WARN_RESPONSE_SIZE, + DEFAULT_WARN_RESPONSE_SIZE); } @Override @@ -326,8 +352,38 @@ rpcMetrics.inc(call.getMethodName(), processingTime); if (verbose) log("Return: "+value); - return new HbaseObjectWritable(method.getReturnType(), value); + HbaseObjectWritable retVal = + new HbaseObjectWritable(method.getReturnType(), value); + long responseSize = retVal.getWritableSize(); + // log any RPC responses that are slower than the configured warn + // response time or larger than configured warning size + boolean tooSlow = (processingTime > warnResponseTime + && warnResponseTime > -1); + boolean tooLarge = (responseSize > warnResponseSize + && warnResponseSize > -1); + if (tooSlow || tooLarge) { + // when tagging, we let TooLarge trump TooSmall to keep output simple + // note that large responses will often also be slow. + logResponse(call, (tooLarge ? "TooLarge" : "TooSlow"), + startTime, processingTime, qTime, responseSize); + if (tooSlow) { + // increment global slow RPC response counter + rpcMetrics.inc("slowResponseCount", 1); + } + if (tooLarge) { + // increment global large RPC response counter + rpcMetrics.inc("largeResponseCount", 1); + } + } + if (processingTime > 1000) { + // we use a hard-coded one second period so that we can clearly + // indicate the time period we're warning about in the name of the + // metric itself + rpcMetrics.inc(call.getMethodName() + ".aboveOneSec.", + processingTime); + } + return retVal; } catch (InvocationTargetException e) { Throwable target = e.getTargetException(); if (target instanceof IOException) { @@ -345,6 +401,60 @@ throw ioe; } } + + /** + * Logs an RPC response to the LOG file, producing valid JSON objects for + * client Operations. + * @param call The call to log. + * @param tag The tag that will be used to indicate this event in the log. + * @param startTime The time that the call was initiated, in ms. + * @param processingTime The duration that the call took to run, in ms. + * @param qTime The duration that the call spent on the queue + * prior to being initiated, in ms. + * @param responseSize The size in bytes of the response buffer. + */ + private void logResponse(Invocation call, String tag, + long startTime, int processingTime, int qTime, long responseSize) + throws IOException { + Object params[] = call.getParameters(); + // for JSON encoding + ObjectMapper mapper = new ObjectMapper(); + // base information that is reported regardless of type of call + Map responseInfo = new HashMap(); + responseInfo.put("starttimemillis", startTime); + responseInfo.put("processingtimemillis", processingTime); + responseInfo.put("queuetimemillis", qTime); + responseInfo.put("responsesize", responseSize); + responseInfo.put("class", instance.getClass().getSimpleName()); + responseInfo.put("method", call.getMethodName()); + if(params.length == 2 && instance instanceof HRegionServer && + params[0] instanceof byte[] && + params[1] instanceof Operation) { + // if the slow process is a query, we want to log its table as well + // as its own fingerprint + byte [] tableName = + HRegionInfo.parseRegionName((byte[]) params[0])[0]; + responseInfo.put("table", Bytes.toStringBinary(tableName)); + // annotate the response map with operation details + responseInfo.putAll(((Operation) params[1]).toMap()); + // report to the log file + LOG.warn("(operation" + tag + "): " + + mapper.writeValueAsString(responseInfo)); + } else if (params.length == 1 && instance instanceof HRegionServer && + params[0] instanceof Operation) { + // annotate the response map with operation details + responseInfo.putAll(((Operation) params[1]).toMap()); + // report to the log file + LOG.warn("(operation" + tag + "): " + + mapper.writeValueAsString(responseInfo)); + } else { + // can't get JSON details, so just report call.toString() along with + // a more generic tag. + responseInfo.put("call", call.toString()); + LOG.warn("(response" + tag + "): " + + mapper.writeValueAsString(responseInfo)); + } + } } protected static void log(String value) { Index: src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java (revision 1160468) +++ src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java (working copy) @@ -93,16 +93,8 @@ */ private static final int DEFAULT_MAX_QUEUE_SIZE_PER_HANDLER = 10; - private static final String WARN_RESPONSE_SIZE = - "hbase.ipc.warn.response.size"; - - /** Default value for above param */ - private static final int DEFAULT_WARN_RESPONSE_SIZE = 100 * 1024 * 1024; - static final int BUFFER_INITIAL_SIZE = 1024; - private final int warnResponseSize; - private static final String WARN_DELAYED_CALLS = "hbase.ipc.warn.delayedrpc.number"; @@ -337,11 +329,6 @@ LOG.warn("Error sending response to call: ", e); } - if (buf.size() > warnResponseSize) { - LOG.warn("responseTooLarge for: "+this+": Size: " - + StringUtils.humanReadableInt(buf.size())); - } - this.response = buf.getByteBuffer(); } @@ -1328,8 +1315,6 @@ this.tcpNoDelay = conf.getBoolean("ipc.server.tcpnodelay", false); this.tcpKeepAlive = conf.getBoolean("ipc.server.tcpkeepalive", true); - this.warnResponseSize = conf.getInt(WARN_RESPONSE_SIZE, - DEFAULT_WARN_RESPONSE_SIZE); this.warnDelayedCalls = conf.getInt(WARN_DELAYED_CALLS, DEFAULT_WARN_DELAYED_CALLS); this.delayedCalls = new AtomicInteger(0); Index: src/main/java/org/apache/hadoop/hbase/client/Operation.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/Operation.java (revision 0) +++ src/main/java/org/apache/hadoop/hbase/client/Operation.java (revision 0) @@ -0,0 +1,110 @@ +/* + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.Map; + +import org.codehaus.jackson.map.ObjectMapper; + +/** + * Superclass for any type that maps to a potentially application-level query. + * (e.g. Put, Get, Delete, Scan, Next, etc.) + * Contains methods for exposure to logging and debugging tools. + */ +public abstract class Operation { + // TODO make this configurable + private static final int DEFAULT_MAX_COLS = 5; + + /** + * Produces a Map containing a fingerprint which identifies the type and + * the static schema components of a query (i.e. column families) + * @return a map containing fingerprint information (i.e. column families) + */ + public abstract Map getFingerprint(); + + /** + * Produces a Map containing a summary of the details of a query + * beyond the scope of the fingerprint (i.e. columns, rows...) + * @param maxCols a limit on the number of columns output prior to truncation + * @return a map containing parameters of a query (i.e. rows, columns...) + */ + public abstract Map toMap(int maxCols); + + /** + * Produces a Map containing a full summary of a query. + * @return a map containing parameters of a query (i.e. rows, columns...) + */ + public Map toMap() { + return toMap(DEFAULT_MAX_COLS); + } + + /** + * Produces a JSON object for fingerprint and details exposure in a + * parseable format. + * @param maxCols a limit on the number of columns to include in the JSON + * @return a JSONObject containing this Operation's information, as a string + */ + public String toJSON(int maxCols) throws IOException { + ObjectMapper mapper = new ObjectMapper(); + return mapper.writeValueAsString(toMap(maxCols)); + } + + /** + * Produces a JSON object sufficient for description of a query + * in a debugging or logging context. + * @return the produced JSON object, as a string + */ + public String toJSON() throws IOException { + return toJSON(DEFAULT_MAX_COLS); + } + + /** + * Produces a string representation of this Operation. It defaults to a JSON + * representation, but falls back to a string representation of the + * fingerprint and details in the case of a JSON encoding failure. + * @param maxCols a limit on the number of columns output in the summary + * prior to truncation + * @return a JSON-parseable String + */ + public String toString(int maxCols) { + /* for now this is merely a wrapper from producing a JSON string, but + * toJSON is kept separate in case this is changed to be a less parsable + * pretty printed representation. + */ + try { + return toJSON(maxCols); + } catch (IOException ioe) { + return toMap(maxCols).toString(); + } + } + + /** + * Produces a string representation of this Operation. It defaults to a JSON + * representation, but falls back to a string representation of the + * fingerprint and details in the case of a JSON encoding failure. + * @return String + */ + @Override + public String toString() { + return toString(DEFAULT_MAX_COLS); + } +} + Index: src/main/java/org/apache/hadoop/hbase/client/Delete.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/Delete.java (revision 1160468) +++ src/main/java/org/apache/hadoop/hbase/client/Delete.java (working copy) @@ -34,7 +34,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.TreeMap; +import java.util.TreeSet; /** * Used to perform Delete operations on a single row. @@ -68,7 +70,8 @@ * deleteFamily -- then you need to use the method overrides that take a * timestamp. The constructor timestamp is not referenced. */ -public class Delete implements Writable, Row, Comparable { +public class Delete extends Operation + implements Writable, Row, Comparable { private static final byte DELETE_VERSION = (byte)3; private byte [] row = null; @@ -340,39 +343,67 @@ } /** - * @return string + * Compile the column family (i.e. schema) information + * into a Map. Useful for parsing and aggregation by debugging, + * logging, and administration tools. + * @return Map */ @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("row="); - sb.append(Bytes.toStringBinary(this.row)); - sb.append(", ts="); - sb.append(this.ts); - sb.append(", families={"); - boolean moreThanOne = false; + public Map getFingerprint() { + Map map = new HashMap(); + List families = new ArrayList(); + // ideally, we would also include table information, but that information + // is not stored in each Operation instance. + map.put("families", families); for(Map.Entry> entry : this.familyMap.entrySet()) { - if(moreThanOne) { - sb.append(", "); - } else { - moreThanOne = true; + families.add(Bytes.toStringBinary(entry.getKey())); + } + return map; + } + + /** + * Compile the details beyond the scope of getFingerprint (row, columns, + * timestamps, etc.) into a Map along with the fingerprinted information. + * Useful for debugging, logging, and administration tools. + * @param maxCols a limit on the number of columns output prior to truncation + * @return Map + */ + @Override + public Map toMap(int maxCols) { + // we start with the fingerprint map and build on top of it. + Map map = getFingerprint(); + // replace the fingerprint's simple list of families with a + // map from column families to lists of qualifiers and kv details + Map>> columns = + new HashMap>>(); + map.put("families", columns); + map.put("row", Bytes.toStringBinary(this.row)); + map.put("ts", this.ts); + int colCount = 0; + // iterate through all column families affected by this Delete + for(Map.Entry> entry : this.familyMap.entrySet()) { + // map from this family to details for each kv affected within the family + List> qualifierDetails = + new ArrayList>(); + columns.put(Bytes.toStringBinary(entry.getKey()), qualifierDetails); + colCount += entry.getValue().size(); + if (maxCols <= 0) { + continue; } - sb.append("(family="); - sb.append(Bytes.toString(entry.getKey())); - sb.append(", keyvalues=("); - boolean moreThanOneB = false; + // add details for each kv for(KeyValue kv : entry.getValue()) { - if(moreThanOneB) { - sb.append(", "); - } else { - moreThanOneB = true; + if (--maxCols <= 0 ) { + continue; } - sb.append(kv.toString()); + Map kvMap = kv.toStringMap(); + // row and family information are already available in the bigger map + kvMap.remove("row"); + kvMap.remove("family"); + qualifierDetails.add(kvMap); } - sb.append(")"); } - sb.append("}"); - return sb.toString(); + map.put("totalColumns", colCount); + return map; } //Writable Index: src/main/java/org/apache/hadoop/hbase/client/Put.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/Put.java (revision 1160468) +++ src/main/java/org/apache/hadoop/hbase/client/Put.java (working copy) @@ -37,7 +37,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.TreeMap; +import java.util.TreeSet; /** @@ -47,7 +49,8 @@ * for each column to be inserted, execute {@link #add(byte[], byte[], byte[]) add} or * {@link #add(byte[], byte[], long, byte[]) add} if setting the timestamp. */ -public class Put implements HeapSize, Writable, Row, Comparable { +public class Put extends Operation + implements HeapSize, Writable, Row, Comparable { private static final byte PUT_VERSION = (byte)2; private byte [] row = null; @@ -462,39 +465,67 @@ return Collections.unmodifiableMap(attributes); } - /** - * @return String + * Compile the column family (i.e. schema) information + * into a Map. Useful for parsing and aggregation by debugging, + * logging, and administration tools. + * @return Map */ @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("row="); - sb.append(Bytes.toStringBinary(this.row)); - sb.append(", families={"); - boolean moreThanOne = false; + public Map getFingerprint() { + Map map = new HashMap(); + List families = new ArrayList(); + // ideally, we would also include table information, but that information + // is not stored in each Operation instance. + map.put("families", families); for(Map.Entry> entry : this.familyMap.entrySet()) { - if(moreThanOne) { - sb.append(", "); - } else { - moreThanOne = true; + families.add(Bytes.toStringBinary(entry.getKey())); + } + return map; + } + + /** + * Compile the details beyond the scope of getFingerprint (row, columns, + * timestamps, etc.) into a Map along with the fingerprinted information. + * Useful for debugging, logging, and administration tools. + * @param maxCols a limit on the number of columns output prior to truncation + * @return Map + */ + @Override + public Map toMap(int maxCols) { + // we start with the fingerprint map and build on top of it. + Map map = getFingerprint(); + // replace the fingerprint's simple list of families with a + // map from column families to lists of qualifiers and kv details + Map>> columns = + new HashMap>>(); + map.put("families", columns); + map.put("row", Bytes.toStringBinary(this.row)); + int colCount = 0; + // iterate through all column families affected by this Put + for(Map.Entry> entry : this.familyMap.entrySet()) { + // map from this family to details for each kv affected within the family + List> qualifierDetails = + new ArrayList>(); + columns.put(Bytes.toStringBinary(entry.getKey()), qualifierDetails); + colCount += entry.getValue().size(); + if (maxCols <= 0) { + continue; } - sb.append("(family="); - sb.append(Bytes.toString(entry.getKey())); - sb.append(", keyvalues=("); - boolean moreThanOneB = false; - for(KeyValue kv : entry.getValue()) { - if(moreThanOneB) { - sb.append(", "); - } else { - moreThanOneB = true; - } - sb.append(kv.toString()); - } - sb.append(")"); - } - sb.append("}"); - return sb.toString(); + // add details for each kv + for(KeyValue kv : entry.getValue()) { + if (--maxCols <= 0 ) { + continue; + } + Map kvMap = kv.toStringMap(); + // row and family information are already available in the bigger map + kvMap.remove("row"); + kvMap.remove("family"); + qualifierDetails.add(kvMap); + } + } + map.put("totalColumns", colCount); + return map; } public int compareTo(Row p) { Index: src/main/java/org/apache/hadoop/hbase/client/Get.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/Get.java (revision 1160468) +++ src/main/java/org/apache/hadoop/hbase/client/Get.java (working copy) @@ -31,8 +31,10 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.NavigableSet; import java.util.Set; @@ -63,7 +65,8 @@ *

* To add a filter, execute {@link #setFilter(Filter) setFilter}. */ -public class Get implements Writable, Row, Comparable { +public class Get extends Operation + implements Writable, Row, Comparable { private static final byte GET_VERSION = (byte)2; private byte [] row = null; @@ -352,55 +355,71 @@ } /** - * @return String + * Compile the table and column family (i.e. schema) information + * into a String. Useful for parsing and aggregation by debugging, + * logging, and administration tools. + * @return Map */ @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("row="); - sb.append(Bytes.toStringBinary(this.row)); - sb.append(", maxVersions="); - sb.append("").append(this.maxVersions); - sb.append(", cacheBlocks="); - sb.append(this.cacheBlocks); - sb.append(", timeRange="); - sb.append("[").append(this.tr.getMin()).append(","); - sb.append(this.tr.getMax()).append(")"); - sb.append(", families="); - if(this.familyMap.size() == 0) { - sb.append("ALL"); - return sb.toString(); + public Map getFingerprint() { + Map map = new HashMap(); + List families = new ArrayList(); + map.put("families", families); + for(Map.Entry> entry : + this.familyMap.entrySet()) { + families.add(Bytes.toStringBinary(entry.getKey())); } - boolean moreThanOne = false; + return map; + } + + /** + * Compile the details beyond the scope of getFingerprint (row, columns, + * timestamps, etc.) into a Map along with the fingerprinted information. + * Useful for debugging, logging, and administration tools. + * @param maxCols a limit on the number of columns output prior to truncation + * @return Map + */ + @Override + public Map toMap(int maxCols) { + // we start with the fingerprint map and build on top of it. + Map map = getFingerprint(); + // replace the fingerprint's simple list of families with a + // map from column families to lists of qualifiers and kv details + Map> columns = new HashMap>(); + map.put("families", columns); + // add scalar information first + map.put("row", Bytes.toStringBinary(this.row)); + map.put("maxVersions", this.maxVersions); + map.put("cacheBlocks", this.cacheBlocks); + List timeRange = new ArrayList(); + timeRange.add(this.tr.getMin()); + timeRange.add(this.tr.getMax()); + map.put("timeRange", timeRange); + int colCount = 0; + // iterate through affected families and add details for(Map.Entry> entry : this.familyMap.entrySet()) { - if(moreThanOne) { - sb.append("), "); - } else { - moreThanOne = true; - sb.append("{"); - } - sb.append("(family="); - sb.append(Bytes.toString(entry.getKey())); - sb.append(", columns="); + List familyList = new ArrayList(); + columns.put(Bytes.toStringBinary(entry.getKey()), familyList); if(entry.getValue() == null) { - sb.append("ALL"); + colCount++; + --maxCols; + familyList.add("ALL"); } else { - sb.append("{"); - boolean moreThanOneB = false; + colCount += entry.getValue().size(); + if (maxCols <= 0) { + continue; + } for(byte [] column : entry.getValue()) { - if(moreThanOneB) { - sb.append(", "); - } else { - moreThanOneB = true; + if (--maxCols <= 0) { + continue; } - sb.append(Bytes.toStringBinary(column)); + familyList.add(Bytes.toStringBinary(column)); } - sb.append("}"); - } - } - sb.append("}"); - return sb.toString(); + } + } + map.put("totalColumns", colCount); + return map; } //Row Index: src/main/java/org/apache/hadoop/hbase/client/MultiPut.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/MultiPut.java (revision 1160468) +++ src/main/java/org/apache/hadoop/hbase/client/MultiPut.java (working copy) @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.Writable; @@ -29,17 +30,23 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.TreeMap; +import java.util.TreeSet; /** * @deprecated Use MultiAction instead * Data type class for putting multiple regions worth of puts in one RPC. */ -public class MultiPut implements Writable { +public class MultiPut extends Operation implements Writable { public HServerAddress address; // client code ONLY + public static final int DEFAULT_MAX_PUT_OUTPUT = 10; + // map of regions to lists of puts for that region. public Map > puts = new TreeMap>(Bytes.BYTES_COMPARATOR); @@ -81,8 +88,117 @@ return res; } + /** + * Compile the table and column family (i.e. schema) information + * into a String. Useful for parsing and aggregation by debugging, + * logging, and administration tools. + * @return Map + */ + @Override + public Map getFingerprint() { + Map map = new HashMap(); + // for extensibility, we have a map of table information that we will + // populate with only family information for each table + Map tableInfo = + new HashMap(); + map.put("tables", tableInfo); + for (Map.Entry> entry : puts.entrySet()) { + // our fingerprint only concerns itself with which families are touched, + // not how many Puts touch them, so we use this Set to do just that. + Set familySet; + try { + // since the puts are stored by region, we may have already + // recorded families for this region. if that is the case, + // we want to add to the existing Set. if not, we make a new Set. + String tableName = Bytes.toStringBinary( + HRegionInfo.parseRegionName(entry.getKey())[0]); + if(tableInfo.get(tableName) == null) { + Map table = new HashMap(); + familySet = new TreeSet(); + table.put("families", familySet); + tableInfo.put(tableName, table); + } else { + familySet = (Set) tableInfo.get(tableName).get("families"); + } + } catch (IOException ioe) { + // in the case of parse error, default to labeling by region + Map table = new HashMap(); + familySet = new TreeSet(); + table.put("families", familySet); + tableInfo.put(Bytes.toStringBinary(entry.getKey()), table); + } + // we now iterate through each Put and keep track of which families + // are affected in this table. + for (Put p : entry.getValue()) { + for (byte[] fam : p.getFamilyMap().keySet()) { + familySet.add(Bytes.toStringBinary(fam)); + } + } + } + return map; + } + /** + * Compile the details beyond the scope of getFingerprint (mostly + * toMap from the Puts) into a Map along with the fingerprinted + * information. Useful for debugging, logging, and administration tools. + * @param maxCols a limit on the number of columns output prior to truncation + * @return Map + */ @Override + public Map toMap(int maxCols) { + Map map = getFingerprint(); + Map tableInfo = (Map) map.get("tables"); + int putCount = 0; + for (Map.Entry> entry : puts.entrySet()) { + // If the limit has been hit for put output, just adjust our counter + if(putCount >= DEFAULT_MAX_PUT_OUTPUT) { + continue; + } + List regionPuts = entry.getValue(); + List> putSummaries = + new ArrayList>(); + // find out how many of this region's puts we can add without busting + // the maximum + int regionPutsToAdd = regionPuts.size(); + putCount += regionPutsToAdd; + if(putCount > DEFAULT_MAX_PUT_OUTPUT) { + regionPutsToAdd -= putCount - DEFAULT_MAX_PUT_OUTPUT; + } + for (Iterator iter = regionPuts.iterator(); regionPutsToAdd-- > 0;) { + putSummaries.add(iter.next().toMap(maxCols)); + } + // attempt to extract the table name from the region name + String tableName = ""; + try { + tableName = Bytes.toStringBinary( + HRegionInfo.parseRegionName(entry.getKey())[0]); + } catch (IOException ioe) { + // in the case of parse error, default to labeling by region + tableName = Bytes.toStringBinary(entry.getKey()); + } + // since the puts are stored by region, we may have already + // recorded puts for this region. if that is the case, + // we want to add to the existing List. if not, we place a new list + // in the map + Map table = + (Map) tableInfo.get(tableName); + if(table == null) { + // in case the Put has changed since getFingerprint's map was built + table = new HashMap(); + tableInfo.put(tableName, table); + table.put("puts", putSummaries); + } else if (table.get("puts") == null) { + table.put("puts", putSummaries); + } else { + ((List>) table.get("puts")).addAll(putSummaries); + } + } + map.put("totalPuts", putCount); + return map; + } + + @Override public void write(DataOutput out) throws IOException { out.writeInt(puts.size()); for( Map.Entry> e : puts.entrySet()) { Index: src/main/java/org/apache/hadoop/hbase/client/Scan.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/Scan.java (revision 1160468) +++ src/main/java/org/apache/hadoop/hbase/client/Scan.java (working copy) @@ -34,8 +34,10 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.NavigableSet; import java.util.TreeMap; @@ -81,7 +83,7 @@ * Expert: To explicitly disable server-side block caching for this scan, * execute {@link #setCacheBlocks(boolean)}. */ -public class Scan implements Writable { +public class Scan extends Operation implements Writable { private static final byte SCAN_VERSION = (byte)2; private byte [] startRow = HConstants.EMPTY_START_ROW; private byte [] stopRow = HConstants.EMPTY_END_ROW; @@ -494,60 +496,78 @@ } /** - * @return String + * Compile the table and column family (i.e. schema) information + * into a String. Useful for parsing and aggregation by debugging, + * logging, and administration tools. + * @return Map */ @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("startRow="); - sb.append(Bytes.toStringBinary(this.startRow)); - sb.append(", stopRow="); - sb.append(Bytes.toStringBinary(this.stopRow)); - sb.append(", maxVersions="); - sb.append(this.maxVersions); - sb.append(", batch="); - sb.append(this.batch); - sb.append(", caching="); - sb.append(this.caching); - sb.append(", cacheBlocks="); - sb.append(this.cacheBlocks); - sb.append(", timeRange="); - sb.append("[").append(this.tr.getMin()).append(","); - sb.append(this.tr.getMax()).append(")"); - sb.append(", families="); + public Map getFingerprint() { + Map map = new HashMap(); + List families = new ArrayList(); if(this.familyMap.size() == 0) { - sb.append("ALL"); - return sb.toString(); + map.put("families", "ALL"); + return map; + } else { + map.put("families", families); } - boolean moreThanOne = false; - for(Map.Entry> entry : this.familyMap.entrySet()) { - if(moreThanOne) { - sb.append("), "); - } else { - moreThanOne = true; - sb.append("{"); - } - sb.append("(family="); - sb.append(Bytes.toStringBinary(entry.getKey())); - sb.append(", columns="); + for(Map.Entry> entry : this.familyMap.entrySet()) { + families.add(Bytes.toStringBinary(entry.getKey())); + } + return map; + } + + /** + * Compile the details beyond the scope of getFingerprint (row, columns, + * timestamps, etc.) into a Map along with the fingerprinted information. + * Useful for debugging, logging, and administration tools. + * @param maxCols a limit on the number of columns output prior to truncation + * @return Map + */ + @Override + public Map toMap(int maxCols) { + // start with the fingerpring map and build on top of it + Map map = getFingerprint(); + // map from families to column list replaces fingerprint's list of families + Map> familyColumns = + new HashMap>(); + map.put("families", familyColumns); + // add scalar information first + map.put("startRow", Bytes.toStringBinary(this.startRow)); + map.put("stopRow", Bytes.toStringBinary(this.stopRow)); + map.put("maxVersions", this.maxVersions); + map.put("batch", this.batch); + map.put("caching", this.caching); + map.put("cacheBlocks", this.cacheBlocks); + List timeRange = new ArrayList(); + timeRange.add(this.tr.getMin()); + timeRange.add(this.tr.getMax()); + map.put("timeRange", timeRange); + int colCount = 0; + // iterate through affected families and list out up to maxCols columns + for(Map.Entry> entry : + this.familyMap.entrySet()) { + List columns = new ArrayList(); + familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns); if(entry.getValue() == null) { - sb.append("ALL"); + colCount++; + --maxCols; + columns.add("ALL"); } else { - sb.append("{"); - boolean moreThanOneB = false; + colCount += entry.getValue().size(); + if (maxCols <= 0) { + continue; + } for(byte [] column : entry.getValue()) { - if(moreThanOneB) { - sb.append(", "); - } else { - moreThanOneB = true; + if (--maxCols <= 0) { + continue; } - sb.append(Bytes.toStringBinary(column)); + columns.add(Bytes.toStringBinary(column)); } - sb.append("}"); - } - } - sb.append("}"); - return sb.toString(); + } + } + map.put("totalColumns", colCount); + return map; } @SuppressWarnings("unchecked") Index: src/main/java/org/apache/hadoop/hbase/client/MultiAction.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/MultiAction.java (revision 1160468) +++ src/main/java/org/apache/hadoop/hbase/client/MultiAction.java (working copy) @@ -22,14 +22,13 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.hbase.io.HbaseObjectWritable; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.HServerAddress; import java.io.DataOutput; import java.io.IOException; import java.io.DataInput; +import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.ArrayList; import java.util.Set; import java.util.TreeMap;