Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java =================================================================== --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java (revision 1518379) +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java (working copy) @@ -48,17 +48,16 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Chore; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; @@ -73,26 +72,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos - .ListNamespaceDescriptorsResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos - .ListNamespaceDescriptorsRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos - .ListTableDescriptorsByNamespaceResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos - .ListTableDescriptorsByNamespaceRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos - .ListTableNamesByNamespaceResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos - .ListTableNamesByNamespaceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest; @@ -101,10 +80,14 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest; @@ -117,17 +100,27 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListTableDescriptorsByNamespaceRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListTableDescriptorsByNamespaceResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListTableNamesByNamespaceRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListTableNamesByNamespaceResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest; Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java =================================================================== --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java (revision 1518379) +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java (working copy) @@ -22,6 +22,7 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionLocation; @@ -42,12 +43,14 @@ */ class MultiServerCallable extends RegionServerCallable { private final MultiAction multi; + private final boolean cellBlock; MultiServerCallable(final HConnection connection, final TableName tableName, final HRegionLocation location, final MultiAction multi) { super(connection, tableName, null); this.multi = multi; setLocation(location); + this.cellBlock = isCellBlock(); } MultiAction getMulti() { @@ -67,16 +70,21 @@ // Row Mutations are a set of Puts and/or Deletes all to be applied atomically // on the one row. We do these a row at a time. if (row instanceof RowMutations) { + RowMutations rms = (RowMutations)row; + List cells = null; + MultiRequest multiRequest; try { - RowMutations rms = (RowMutations)row; - // Stick all Cells for all RowMutations in here into 'cells'. Populated when we call - // buildNoDataMultiRequest in the below. - List cells = new ArrayList(rms.getMutations().size()); - // Build a multi request absent its Cell payload (this is the 'nodata' in the below). - MultiRequest multiRequest = - RequestConverter.buildNoDataMultiRequest(regionName, rms, cells); - // Carry the cells over the proxy/pb Service interface using the payload carrying - // rpc controller. + if (this.cellBlock) { + // Stick all Cells for all RowMutations in here into 'cells'. Populated when we call + // buildNoDataMultiRequest in the below. + cells = new ArrayList(rms.getMutations().size()); + // Build a multi request absent its Cell payload (this is the 'nodata' in the below). + multiRequest = RequestConverter.buildNoDataMultiRequest(regionName, rms, cells); + } else { + multiRequest = RequestConverter.buildMultiRequest(regionName, rms); + } + // Carry the cells if any over the proxy/pb Service interface using the payload + // carrying rpc controller. getStub().multi(new PayloadCarryingRpcController(cells), multiRequest); // This multi call does not return results. response.add(regionName, action.getOriginalIndex(), Result.EMPTY_RESULT); @@ -91,14 +99,17 @@ if (actions.size() > rowMutations) { Exception ex = null; List results = null; - // Stick all Cells for the multiRequest in here into 'cells'. Gets filled in when we - // call buildNoDataMultiRequest - List cells = new ArrayList(actions.size() - rowMutations); + List cells = null; + MultiRequest multiRequest; try { - // The call to buildNoDataMultiRequest will skip RowMutations. They have - // already been handled above. - MultiRequest multiRequest = - RequestConverter.buildNoDataMultiRequest(regionName, actions, cells); + if (isCellBlock()) { + // Send data in cellblocks. The call to buildNoDataMultiRequest will skip RowMutations. + // They have already been handled above. + cells = new ArrayList(actions.size() - rowMutations); + multiRequest = RequestConverter.buildNoDataMultiRequest(regionName, actions, cells); + } else { + multiRequest = RequestConverter.buildMultiRequest(regionName, actions); + } // Controller optionally carries cell data over the proxy/service boundary and also // optionally ferries cell response data back out again. PayloadCarryingRpcController controller = new PayloadCarryingRpcController(cells); @@ -116,9 +127,25 @@ return response; } + + /** + * @return True if we should send data in cellblocks. This is an expensive call. Cache the + * result if you can rather than call each time. + */ + private boolean isCellBlock() { + // This is not exact -- the configuration could have changed on us after connection was set up + // but it will do for now. + HConnection connection = getConnection(); + if (connection == null) return true; // Default is to do cellblocks. + Configuration configuration = connection.getConfiguration(); + if (configuration == null) return true; + String codec = configuration.get("hbase.client.rpc.codec", ""); + return codec != null && codec.length() > 0; + } + @Override public void prepare(boolean reload) throws IOException { // Use the location we were given in the constructor rather than go look it up. setStub(getConnection().getClient(getLocation().getServerName())); } -} +} \ No newline at end of file Index: hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java =================================================================== --- hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java (revision 1518379) +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java (working copy) @@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.io.ByteBufferOutputStream; import org.apache.hadoop.hbase.io.HeapSize; @@ -71,6 +72,11 @@ } /** + * Thrown if a cellscanner but no codec to encode it with. + */ + public static class CellScannerButNoCodecException extends HBaseIOException {}; + + /** * Puts CellScanner Cells into a cell block using passed in codec and/or * compressor. * @param codec @@ -86,6 +92,7 @@ final CellScanner cellScanner) throws IOException { if (cellScanner == null) return null; + if (codec == null) throw new CellScannerButNoCodecException(); int bufferSize = this.cellBlockBuildingInitialBufferSize; if (cellScanner instanceof HeapSize) { long longSize = ((HeapSize)cellScanner).heapSize(); Index: hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/PayloadCarryingRpcController.java =================================================================== --- hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/PayloadCarryingRpcController.java (revision 1518379) +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/PayloadCarryingRpcController.java (working copy) @@ -55,7 +55,7 @@ } public PayloadCarryingRpcController(final List cellIterables) { - this.cellScanner = CellUtil.createCellScanner(cellIterables); + this.cellScanner = cellIterables == null? null: CellUtil.createCellScanner(cellIterables); } /** Index: hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java =================================================================== --- hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java (revision 1518379) +++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java (working copy) @@ -89,6 +89,7 @@ import org.cloudera.htrace.Span; import org.cloudera.htrace.Trace; +import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.BlockingRpcChannel; import com.google.protobuf.Descriptors.MethodDescriptor; import com.google.protobuf.Message; @@ -426,7 +427,9 @@ if ((userInfoPB = getUserInfo(ticket)) != null) { builder.setUserInfo(userInfoPB); } - builder.setCellBlockCodecClass(this.codec.getClass().getCanonicalName()); + if (this.codec != null) { + builder.setCellBlockCodecClass(this.codec.getClass().getCanonicalName()); + } if (this.compressor != null) { builder.setCellBlockCompressorClass(this.compressor.getClass().getCanonicalName()); } @@ -1249,7 +1252,7 @@ this.pingInterval = getPingInterval(conf); this.ipcUtil = new IPCUtil(conf); this.conf = conf; - this.codec = getCodec(conf); + this.codec = getCodec(); this.compressor = getCompressor(conf); this.socketFactory = factory; this.clusterId = clusterId != null ? clusterId : HConstants.CLUSTER_ID_DEFAULT; @@ -1291,18 +1294,28 @@ /** * Encapsulate the ugly casting and RuntimeException conversion in private method. - * @param conf * @return Codec to use on this client. */ - private static Codec getCodec(final Configuration conf) { - String className = conf.get("hbase.client.rpc.codec", KeyValueCodec.class.getCanonicalName()); + Codec getCodec() { + // For NO CODEC, "hbase.client.rpc.codec" must be the empty string AND + // "hbase.client.default.rpc.codec" -- because default is to do cell block encoding. + String className = conf.get("hbase.client.rpc.codec", getDefaultCodec(this.conf)); + if (className == null || className.length() == 0) return null; try { - return (Codec)Class.forName(className).newInstance(); + return (Codec)Class.forName(className).newInstance(); } catch (Exception e) { throw new RuntimeException("Failed getting codec " + className, e); } } + @VisibleForTesting + public static String getDefaultCodec(final Configuration c) { + // If "hbase.client.default.rpc.codec" is empty string -- you can't set it to null because + // Configuration will complain -- then no default codec (and we'll pb everything). Else + // default is KeyValueCodec + return c.get("hbase.client.default.rpc.codec", KeyValueCodec.class.getCanonicalName()); + } + /** * Encapsulate the ugly casting and RuntimeException conversion in private method. * @param conf Index: hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java =================================================================== --- hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java (revision 1518379) +++ hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java (working copy) @@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ActionResult; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse; @@ -277,36 +276,44 @@ */ public static Result[] getResults(CellScanner cellScanner, ScanResponse response) throws IOException { - if (response == null || cellScanner == null) return null; - ResultCellMeta resultCellMeta = response.getResultCellMeta(); - if (resultCellMeta == null) return null; - int noOfResults = resultCellMeta.getCellsLengthCount(); + if (response == null) return null; + // If cellscanner, then the number of Results to return is the count of elements in the + // cellsPerResult list. Otherwise, it is how many results are embedded inside the response. + int noOfResults = cellScanner != null? + response.getCellsPerResultCount(): response.getResultsCount(); Result[] results = new Result[noOfResults]; for (int i = 0; i < noOfResults; i++) { - int noOfCells = resultCellMeta.getCellsLength(i); - List cells = new ArrayList(noOfCells); - for (int j = 0; j < noOfCells; j++) { - try { - if (cellScanner.advance() == false) { - // We are not able to retrieve the exact number of cells which ResultCellMeta says us. + if (cellScanner != null) { + // Cells are out in cellblocks. Group them up again as Results. How many to read at a + // time will be found in getCellsLength -- length here is how many Cells in the i'th Result + int noOfCells = response.getCellsPerResult(i); + List cells = new ArrayList(noOfCells); + for (int j = 0; j < noOfCells; j++) { + try { + if (cellScanner.advance() == false) { + // We are not able to retrieve the exact number of cells which ResultCellMeta says us. + // We have to scan for the same results again. Throwing DNRIOE as a client retry on the + // same scanner will result in OutOfOrderScannerNextException + String msg = "Results sent from server=" + noOfResults + ". But only got " + i + + " results completely at client. Resetting the scanner to scan again."; + LOG.error(msg); + throw new DoNotRetryIOException(msg); + } + } catch (IOException ioe) { + // We are getting IOE while retrieving the cells for Results. // We have to scan for the same results again. Throwing DNRIOE as a client retry on the // same scanner will result in OutOfOrderScannerNextException - String msg = "Results sent from server=" + noOfResults + ". But only got " + i - + " results completely at client. Resetting the scanner to scan again."; - LOG.error(msg); - throw new DoNotRetryIOException(msg); + LOG.error("Exception while reading cells from result." + + "Resetting the scanner to scan again.", ioe); + throw new DoNotRetryIOException("Resetting the scanner.", ioe); } - } catch (IOException ioe) { - // We are getting IOE while retrieving the cells for Results. - // We have to scan for the same results again. Throwing DNRIOE as a client retry on the - // same scanner will result in OutOfOrderScannerNextException - LOG.error("Exception while reading cells from result." - + "Resetting the scanner to scan again.", ioe); - throw new DoNotRetryIOException("Resetting the scanner.", ioe); + cells.add(cellScanner.current()); } - cells.add(cellScanner.current()); + results[i] = new Result(cells); + } else { + // Result is pure pb. + results[i] = ProtobufUtil.toResult(response.getResults(i)); } - results[i] = new Result(cells); } return results; } Index: hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java =================================================================== --- hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java (revision 1518379) +++ hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java (working copy) @@ -26,6 +26,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -40,6 +41,7 @@ import org.junit.Before; import org.junit.Test; import org.junit.Ignore; +import org.junit.experimental.categories.Category; import org.mockito.Mockito; import com.google.protobuf.RpcController; @@ -49,6 +51,7 @@ * Test client behavior w/o setting up a cluster. * Mock up cluster emissions. */ +@Category(SmallTests.class) public class TestClientNoCluster { private static final Log LOG = LogFactory.getLog(TestClientNoCluster.class); private Configuration conf; Index: hbase-common/src/main/java/org/apache/hadoop/hbase/CellScanner.java =================================================================== --- hbase-common/src/main/java/org/apache/hadoop/hbase/CellScanner.java (revision 1518379) +++ hbase-common/src/main/java/org/apache/hadoop/hbase/CellScanner.java (working copy) @@ -40,8 +40,8 @@ * Typical usage: * *
- * while (scanner.next()) {
- *   Cell cell = scanner.get();
+ * while (scanner.advance()) {
+ *   Cell cell = scanner.current();
  *   // do something
  * }
  * 
Index: hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java =================================================================== --- hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java (revision 1518379) +++ hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java (working copy) @@ -161,14 +161,17 @@ * @return CellScanner interface over cellIterable */ public static CellScanner createCellScanner(final Iterable cellIterable) { + if (cellIterable == null) return null; return createCellScanner(cellIterable.iterator()); } /** * @param cells - * @return CellScanner interface over cellIterable + * @return CellScanner interface over cellIterable or null if cells is + * null */ public static CellScanner createCellScanner(final Iterator cells) { + if (cells == null) return null; return new CellScanner() { private final Iterator iterator = cells; private Cell current = null; Index: hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java =================================================================== --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java (revision 1518379) +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java (working copy) @@ -17481,19 +17481,49 @@ public interface ScanResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional .ResultCellMeta result_cell_meta = 1; + // repeated uint32 cells_per_result = 1; /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_per_result = 1; + * + *
+     * This field is filled in if we are doing cellblocks.  A cellblock is made up
+     * of all Cells serialized out as one cellblock BUT responses from a server
+     * have their Cells grouped by Result.  So we can reconstitute the
+     * Results on the client-side, this field is a list of counts of Cells
+     * in each Result that makes up the response.  For example, if this field
+     * has 3, 3, 3 in it, then we know that on the client, we are to make
+     * three Results each of three Cells each.
+     * 
*/ - boolean hasResultCellMeta(); + java.util.List getCellsPerResultList(); /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_per_result = 1; + * + *
+     * This field is filled in if we are doing cellblocks.  A cellblock is made up
+     * of all Cells serialized out as one cellblock BUT responses from a server
+     * have their Cells grouped by Result.  So we can reconstitute the
+     * Results on the client-side, this field is a list of counts of Cells
+     * in each Result that makes up the response.  For example, if this field
+     * has 3, 3, 3 in it, then we know that on the client, we are to make
+     * three Results each of three Cells each.
+     * 
*/ - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta getResultCellMeta(); + int getCellsPerResultCount(); /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_per_result = 1; + * + *
+     * This field is filled in if we are doing cellblocks.  A cellblock is made up
+     * of all Cells serialized out as one cellblock BUT responses from a server
+     * have their Cells grouped by Result.  So we can reconstitute the
+     * Results on the client-side, this field is a list of counts of Cells
+     * in each Result that makes up the response.  For example, if this field
+     * has 3, 3, 3 in it, then we know that on the client, we are to make
+     * three Results each of three Cells each.
+     * 
*/ - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMetaOrBuilder getResultCellMetaOrBuilder(); + int getCellsPerResult(int index); // optional uint64 scanner_id = 2; /** @@ -17524,6 +17554,61 @@ * optional uint32 ttl = 4; */ int getTtl(); + + // repeated .Result results = 5; + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * This field is mutually exclusive with cells_per_result (since the Cells will
+     * be inside the pb'd Result)
+     * 
+ */ + java.util.List + getResultsList(); + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * This field is mutually exclusive with cells_per_result (since the Cells will
+     * be inside the pb'd Result)
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result getResults(int index); + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * This field is mutually exclusive with cells_per_result (since the Cells will
+     * be inside the pb'd Result)
+     * 
+ */ + int getResultsCount(); + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * This field is mutually exclusive with cells_per_result (since the Cells will
+     * be inside the pb'd Result)
+     * 
+ */ + java.util.List + getResultsOrBuilderList(); + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * This field is mutually exclusive with cells_per_result (since the Cells will
+     * be inside the pb'd Result)
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder getResultsOrBuilder( + int index); } /** * Protobuf type {@code ScanResponse} @@ -17582,34 +17667,50 @@ } break; } + case 8: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + cellsPerResult_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + cellsPerResult_.add(input.readUInt32()); + break; + } case 10: { - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = resultCellMeta_.toBuilder(); + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) { + cellsPerResult_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; } - resultCellMeta_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(resultCellMeta_); - resultCellMeta_ = subBuilder.buildPartial(); + while (input.getBytesUntilLimit() > 0) { + cellsPerResult_.add(input.readUInt32()); } - bitField0_ |= 0x00000001; + input.popLimit(limit); break; } case 16: { - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000001; scannerId_ = input.readUInt64(); break; } case 24: { - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000002; moreResults_ = input.readBool(); break; } case 32: { - bitField0_ |= 0x00000008; + bitField0_ |= 0x00000004; ttl_ = input.readUInt32(); break; } + case 42: { + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + results_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000010; + } + results_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.PARSER, extensionRegistry)); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -17618,6 +17719,12 @@ throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + cellsPerResult_ = java.util.Collections.unmodifiableList(cellsPerResult_); + } + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + results_ = java.util.Collections.unmodifiableList(results_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -17650,26 +17757,57 @@ } private int bitField0_; - // optional .ResultCellMeta result_cell_meta = 1; - public static final int RESULT_CELL_META_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta resultCellMeta_; + // repeated uint32 cells_per_result = 1; + public static final int CELLS_PER_RESULT_FIELD_NUMBER = 1; + private java.util.List cellsPerResult_; /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_per_result = 1; + * + *
+     * This field is filled in if we are doing cellblocks.  A cellblock is made up
+     * of all Cells serialized out as one cellblock BUT responses from a server
+     * have their Cells grouped by Result.  So we can reconstitute the
+     * Results on the client-side, this field is a list of counts of Cells
+     * in each Result that makes up the response.  For example, if this field
+     * has 3, 3, 3 in it, then we know that on the client, we are to make
+     * three Results each of three Cells each.
+     * 
*/ - public boolean hasResultCellMeta() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List + getCellsPerResultList() { + return cellsPerResult_; } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_per_result = 1; + * + *
+     * This field is filled in if we are doing cellblocks.  A cellblock is made up
+     * of all Cells serialized out as one cellblock BUT responses from a server
+     * have their Cells grouped by Result.  So we can reconstitute the
+     * Results on the client-side, this field is a list of counts of Cells
+     * in each Result that makes up the response.  For example, if this field
+     * has 3, 3, 3 in it, then we know that on the client, we are to make
+     * three Results each of three Cells each.
+     * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta getResultCellMeta() { - return resultCellMeta_; + public int getCellsPerResultCount() { + return cellsPerResult_.size(); } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_per_result = 1; + * + *
+     * This field is filled in if we are doing cellblocks.  A cellblock is made up
+     * of all Cells serialized out as one cellblock BUT responses from a server
+     * have their Cells grouped by Result.  So we can reconstitute the
+     * Results on the client-side, this field is a list of counts of Cells
+     * in each Result that makes up the response.  For example, if this field
+     * has 3, 3, 3 in it, then we know that on the client, we are to make
+     * three Results each of three Cells each.
+     * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMetaOrBuilder getResultCellMetaOrBuilder() { - return resultCellMeta_; + public int getCellsPerResult(int index) { + return cellsPerResult_.get(index); } // optional uint64 scanner_id = 2; @@ -17679,7 +17817,7 @@ * optional uint64 scanner_id = 2; */ public boolean hasScannerId() { - return ((bitField0_ & 0x00000002) == 0x00000002); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional uint64 scanner_id = 2; @@ -17695,7 +17833,7 @@ * optional bool more_results = 3; */ public boolean hasMoreResults() { - return ((bitField0_ & 0x00000004) == 0x00000004); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bool more_results = 3; @@ -17711,7 +17849,7 @@ * optional uint32 ttl = 4; */ public boolean hasTtl() { - return ((bitField0_ & 0x00000008) == 0x00000008); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional uint32 ttl = 4; @@ -17720,11 +17858,78 @@ return ttl_; } + // repeated .Result results = 5; + public static final int RESULTS_FIELD_NUMBER = 5; + private java.util.List results_; + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * This field is mutually exclusive with cells_per_result (since the Cells will
+     * be inside the pb'd Result)
+     * 
+ */ + public java.util.List getResultsList() { + return results_; + } + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * This field is mutually exclusive with cells_per_result (since the Cells will
+     * be inside the pb'd Result)
+     * 
+ */ + public java.util.List + getResultsOrBuilderList() { + return results_; + } + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * This field is mutually exclusive with cells_per_result (since the Cells will
+     * be inside the pb'd Result)
+     * 
+ */ + public int getResultsCount() { + return results_.size(); + } + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * This field is mutually exclusive with cells_per_result (since the Cells will
+     * be inside the pb'd Result)
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result getResults(int index) { + return results_.get(index); + } + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * This field is mutually exclusive with cells_per_result (since the Cells will
+     * be inside the pb'd Result)
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder getResultsOrBuilder( + int index) { + return results_.get(index); + } + private void initFields() { - resultCellMeta_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.getDefaultInstance(); + cellsPerResult_ = java.util.Collections.emptyList(); scannerId_ = 0L; moreResults_ = false; ttl_ = 0; + results_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -17738,18 +17943,21 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + for (int i = 0; i < cellsPerResult_.size(); i++) { + output.writeUInt32(1, cellsPerResult_.get(i)); + } if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, resultCellMeta_); + output.writeUInt64(2, scannerId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, scannerId_); + output.writeBool(3, moreResults_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBool(3, moreResults_); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeUInt32(4, ttl_); } + for (int i = 0; i < results_.size(); i++) { + output.writeMessage(5, results_.get(i)); + } getUnknownFields().writeTo(output); } @@ -17759,21 +17967,30 @@ if (size != -1) return size; size = 0; + { + int dataSize = 0; + for (int i = 0; i < cellsPerResult_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeUInt32SizeNoTag(cellsPerResult_.get(i)); + } + size += dataSize; + size += 1 * getCellsPerResultList().size(); + } if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, resultCellMeta_); + .computeUInt64Size(2, scannerId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, scannerId_); + .computeBoolSize(3, moreResults_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(3, moreResults_); + .computeUInt32Size(4, ttl_); } - if (((bitField0_ & 0x00000008) == 0x00000008)) { + for (int i = 0; i < results_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(4, ttl_); + .computeMessageSize(5, results_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -17798,11 +18015,8 @@ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse) obj; boolean result = true; - result = result && (hasResultCellMeta() == other.hasResultCellMeta()); - if (hasResultCellMeta()) { - result = result && getResultCellMeta() - .equals(other.getResultCellMeta()); - } + result = result && getCellsPerResultList() + .equals(other.getCellsPerResultList()); result = result && (hasScannerId() == other.hasScannerId()); if (hasScannerId()) { result = result && (getScannerId() @@ -17818,6 +18032,8 @@ result = result && (getTtl() == other.getTtl()); } + result = result && getResultsList() + .equals(other.getResultsList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -17831,9 +18047,9 @@ } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasResultCellMeta()) { - hash = (37 * hash) + RESULT_CELL_META_FIELD_NUMBER; - hash = (53 * hash) + getResultCellMeta().hashCode(); + if (getCellsPerResultCount() > 0) { + hash = (37 * hash) + CELLS_PER_RESULT_FIELD_NUMBER; + hash = (53 * hash) + getCellsPerResultList().hashCode(); } if (hasScannerId()) { hash = (37 * hash) + SCANNER_ID_FIELD_NUMBER; @@ -17847,6 +18063,10 @@ hash = (37 * hash) + TTL_FIELD_NUMBER; hash = (53 * hash) + getTtl(); } + if (getResultsCount() > 0) { + hash = (37 * hash) + RESULTS_FIELD_NUMBER; + hash = (53 * hash) + getResultsList().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -17954,7 +18174,7 @@ } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getResultCellMetaFieldBuilder(); + getResultsFieldBuilder(); } } private static Builder create() { @@ -17963,11 +18183,7 @@ public Builder clear() { super.clear(); - if (resultCellMetaBuilder_ == null) { - resultCellMeta_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.getDefaultInstance(); - } else { - resultCellMetaBuilder_.clear(); - } + cellsPerResult_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); scannerId_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); @@ -17975,6 +18191,12 @@ bitField0_ = (bitField0_ & ~0x00000004); ttl_ = 0; bitField0_ = (bitField0_ & ~0x00000008); + if (resultsBuilder_ == null) { + results_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + } else { + resultsBuilder_.clear(); + } return this; } @@ -18003,26 +18225,32 @@ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse result = new org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + cellsPerResult_ = java.util.Collections.unmodifiableList(cellsPerResult_); + bitField0_ = (bitField0_ & ~0x00000001); } - if (resultCellMetaBuilder_ == null) { - result.resultCellMeta_ = resultCellMeta_; - } else { - result.resultCellMeta_ = resultCellMetaBuilder_.build(); - } + result.cellsPerResult_ = cellsPerResult_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; + to_bitField0_ |= 0x00000001; } result.scannerId_ = scannerId_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; + to_bitField0_ |= 0x00000002; } result.moreResults_ = moreResults_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; + to_bitField0_ |= 0x00000004; } result.ttl_ = ttl_; + if (resultsBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { + results_ = java.util.Collections.unmodifiableList(results_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.results_ = results_; + } else { + result.results_ = resultsBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -18039,8 +18267,15 @@ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse other) { if (other == org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse.getDefaultInstance()) return this; - if (other.hasResultCellMeta()) { - mergeResultCellMeta(other.getResultCellMeta()); + if (!other.cellsPerResult_.isEmpty()) { + if (cellsPerResult_.isEmpty()) { + cellsPerResult_ = other.cellsPerResult_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureCellsPerResultIsMutable(); + cellsPerResult_.addAll(other.cellsPerResult_); + } + onChanged(); } if (other.hasScannerId()) { setScannerId(other.getScannerId()); @@ -18051,6 +18286,32 @@ if (other.hasTtl()) { setTtl(other.getTtl()); } + if (resultsBuilder_ == null) { + if (!other.results_.isEmpty()) { + if (results_.isEmpty()) { + results_ = other.results_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureResultsIsMutable(); + results_.addAll(other.results_); + } + onChanged(); + } + } else { + if (!other.results_.isEmpty()) { + if (resultsBuilder_.isEmpty()) { + resultsBuilder_.dispose(); + resultsBuilder_ = null; + results_ = other.results_; + bitField0_ = (bitField0_ & ~0x00000010); + resultsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getResultsFieldBuilder() : null; + } else { + resultsBuilder_.addAllMessages(other.results_); + } + } + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -18078,122 +18339,141 @@ } private int bitField0_; - // optional .ResultCellMeta result_cell_meta = 1; - private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta resultCellMeta_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMetaOrBuilder> resultCellMetaBuilder_; + // repeated uint32 cells_per_result = 1; + private java.util.List cellsPerResult_ = java.util.Collections.emptyList(); + private void ensureCellsPerResultIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + cellsPerResult_ = new java.util.ArrayList(cellsPerResult_); + bitField0_ |= 0x00000001; + } + } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_per_result = 1; + * + *
+       * This field is filled in if we are doing cellblocks.  A cellblock is made up
+       * of all Cells serialized out as one cellblock BUT responses from a server
+       * have their Cells grouped by Result.  So we can reconstitute the
+       * Results on the client-side, this field is a list of counts of Cells
+       * in each Result that makes up the response.  For example, if this field
+       * has 3, 3, 3 in it, then we know that on the client, we are to make
+       * three Results each of three Cells each.
+       * 
*/ - public boolean hasResultCellMeta() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List + getCellsPerResultList() { + return java.util.Collections.unmodifiableList(cellsPerResult_); } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_per_result = 1; + * + *
+       * This field is filled in if we are doing cellblocks.  A cellblock is made up
+       * of all Cells serialized out as one cellblock BUT responses from a server
+       * have their Cells grouped by Result.  So we can reconstitute the
+       * Results on the client-side, this field is a list of counts of Cells
+       * in each Result that makes up the response.  For example, if this field
+       * has 3, 3, 3 in it, then we know that on the client, we are to make
+       * three Results each of three Cells each.
+       * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta getResultCellMeta() { - if (resultCellMetaBuilder_ == null) { - return resultCellMeta_; - } else { - return resultCellMetaBuilder_.getMessage(); - } + public int getCellsPerResultCount() { + return cellsPerResult_.size(); } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_per_result = 1; + * + *
+       * This field is filled in if we are doing cellblocks.  A cellblock is made up
+       * of all Cells serialized out as one cellblock BUT responses from a server
+       * have their Cells grouped by Result.  So we can reconstitute the
+       * Results on the client-side, this field is a list of counts of Cells
+       * in each Result that makes up the response.  For example, if this field
+       * has 3, 3, 3 in it, then we know that on the client, we are to make
+       * three Results each of three Cells each.
+       * 
*/ - public Builder setResultCellMeta(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta value) { - if (resultCellMetaBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - resultCellMeta_ = value; - onChanged(); - } else { - resultCellMetaBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; + public int getCellsPerResult(int index) { + return cellsPerResult_.get(index); } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_per_result = 1; + * + *
+       * This field is filled in if we are doing cellblocks.  A cellblock is made up
+       * of all Cells serialized out as one cellblock BUT responses from a server
+       * have their Cells grouped by Result.  So we can reconstitute the
+       * Results on the client-side, this field is a list of counts of Cells
+       * in each Result that makes up the response.  For example, if this field
+       * has 3, 3, 3 in it, then we know that on the client, we are to make
+       * three Results each of three Cells each.
+       * 
*/ - public Builder setResultCellMeta( - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.Builder builderForValue) { - if (resultCellMetaBuilder_ == null) { - resultCellMeta_ = builderForValue.build(); - onChanged(); - } else { - resultCellMetaBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; + public Builder setCellsPerResult( + int index, int value) { + ensureCellsPerResultIsMutable(); + cellsPerResult_.set(index, value); + onChanged(); return this; } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_per_result = 1; + * + *
+       * This field is filled in if we are doing cellblocks.  A cellblock is made up
+       * of all Cells serialized out as one cellblock BUT responses from a server
+       * have their Cells grouped by Result.  So we can reconstitute the
+       * Results on the client-side, this field is a list of counts of Cells
+       * in each Result that makes up the response.  For example, if this field
+       * has 3, 3, 3 in it, then we know that on the client, we are to make
+       * three Results each of three Cells each.
+       * 
*/ - public Builder mergeResultCellMeta(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta value) { - if (resultCellMetaBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - resultCellMeta_ != org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.getDefaultInstance()) { - resultCellMeta_ = - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.newBuilder(resultCellMeta_).mergeFrom(value).buildPartial(); - } else { - resultCellMeta_ = value; - } - onChanged(); - } else { - resultCellMetaBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; + public Builder addCellsPerResult(int value) { + ensureCellsPerResultIsMutable(); + cellsPerResult_.add(value); + onChanged(); return this; } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_per_result = 1; + * + *
+       * This field is filled in if we are doing cellblocks.  A cellblock is made up
+       * of all Cells serialized out as one cellblock BUT responses from a server
+       * have their Cells grouped by Result.  So we can reconstitute the
+       * Results on the client-side, this field is a list of counts of Cells
+       * in each Result that makes up the response.  For example, if this field
+       * has 3, 3, 3 in it, then we know that on the client, we are to make
+       * three Results each of three Cells each.
+       * 
*/ - public Builder clearResultCellMeta() { - if (resultCellMetaBuilder_ == null) { - resultCellMeta_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.getDefaultInstance(); - onChanged(); - } else { - resultCellMetaBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); + public Builder addAllCellsPerResult( + java.lang.Iterable values) { + ensureCellsPerResultIsMutable(); + super.addAll(values, cellsPerResult_); + onChanged(); return this; } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_per_result = 1; + * + *
+       * This field is filled in if we are doing cellblocks.  A cellblock is made up
+       * of all Cells serialized out as one cellblock BUT responses from a server
+       * have their Cells grouped by Result.  So we can reconstitute the
+       * Results on the client-side, this field is a list of counts of Cells
+       * in each Result that makes up the response.  For example, if this field
+       * has 3, 3, 3 in it, then we know that on the client, we are to make
+       * three Results each of three Cells each.
+       * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.Builder getResultCellMetaBuilder() { - bitField0_ |= 0x00000001; + public Builder clearCellsPerResult() { + cellsPerResult_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); onChanged(); - return getResultCellMetaFieldBuilder().getBuilder(); + return this; } - /** - * optional .ResultCellMeta result_cell_meta = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMetaOrBuilder getResultCellMetaOrBuilder() { - if (resultCellMetaBuilder_ != null) { - return resultCellMetaBuilder_.getMessageOrBuilder(); - } else { - return resultCellMeta_; - } - } - /** - * optional .ResultCellMeta result_cell_meta = 1; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMetaOrBuilder> - getResultCellMetaFieldBuilder() { - if (resultCellMetaBuilder_ == null) { - resultCellMetaBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMetaOrBuilder>( - resultCellMeta_, - getParentForChildren(), - isClean()); - resultCellMeta_ = null; - } - return resultCellMetaBuilder_; - } // optional uint64 scanner_id = 2; private long scannerId_ ; @@ -18294,519 +18574,363 @@ return this; } - // @@protoc_insertion_point(builder_scope:ScanResponse) - } + // repeated .Result results = 5; + private java.util.List results_ = + java.util.Collections.emptyList(); + private void ensureResultsIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + results_ = new java.util.ArrayList(results_); + bitField0_ |= 0x00000010; + } + } - static { - defaultInstance = new ScanResponse(true); - defaultInstance.initFields(); - } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder> resultsBuilder_; - // @@protoc_insertion_point(class_scope:ScanResponse) - } - - public interface ResultCellMetaOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated uint32 cells_length = 1; - /** - * repeated uint32 cells_length = 1; - */ - java.util.List getCellsLengthList(); - /** - * repeated uint32 cells_length = 1; - */ - int getCellsLengthCount(); - /** - * repeated uint32 cells_length = 1; - */ - int getCellsLength(int index); - } - /** - * Protobuf type {@code ResultCellMeta} - */ - public static final class ResultCellMeta extends - com.google.protobuf.GeneratedMessage - implements ResultCellMetaOrBuilder { - // Use ResultCellMeta.newBuilder() to construct. - private ResultCellMeta(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private ResultCellMeta(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final ResultCellMeta defaultInstance; - public static ResultCellMeta getDefaultInstance() { - return defaultInstance; - } - - public ResultCellMeta getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private ResultCellMeta( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - cellsLength_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - cellsLength_.add(input.readUInt32()); - break; - } - case 10: { - int length = input.readRawVarint32(); - int limit = input.pushLimit(length); - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) { - cellsLength_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - while (input.getBytesUntilLimit() > 0) { - cellsLength_.add(input.readUInt32()); - } - input.popLimit(limit); - break; - } - } + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
+ */ + public java.util.List getResultsList() { + if (resultsBuilder_ == null) { + return java.util.Collections.unmodifiableList(results_); + } else { + return resultsBuilder_.getMessageList(); } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - cellsLength_ = java.util.Collections.unmodifiableList(cellsLength_); + } + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
+ */ + public int getResultsCount() { + if (resultsBuilder_ == null) { + return results_.size(); + } else { + return resultsBuilder_.getCount(); } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_ResultCellMeta_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_ResultCellMeta_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ResultCellMeta parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ResultCellMeta(input, extensionRegistry); + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result getResults(int index) { + if (resultsBuilder_ == null) { + return results_.get(index); + } else { + return resultsBuilder_.getMessage(index); + } } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - // repeated uint32 cells_length = 1; - public static final int CELLS_LENGTH_FIELD_NUMBER = 1; - private java.util.List cellsLength_; - /** - * repeated uint32 cells_length = 1; - */ - public java.util.List - getCellsLengthList() { - return cellsLength_; - } - /** - * repeated uint32 cells_length = 1; - */ - public int getCellsLengthCount() { - return cellsLength_.size(); - } - /** - * repeated uint32 cells_length = 1; - */ - public int getCellsLength(int index) { - return cellsLength_.get(index); - } - - private void initFields() { - cellsLength_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < cellsLength_.size(); i++) { - output.writeUInt32(1, cellsLength_.get(i)); + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
+ */ + public Builder setResults( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result value) { + if (resultsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureResultsIsMutable(); + results_.set(index, value); + onChanged(); + } else { + resultsBuilder_.setMessage(index, value); + } + return this; } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - { - int dataSize = 0; - for (int i = 0; i < cellsLength_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeUInt32SizeNoTag(cellsLength_.get(i)); + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
+ */ + public Builder setResults( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder builderForValue) { + if (resultsBuilder_ == null) { + ensureResultsIsMutable(); + results_.set(index, builderForValue.build()); + onChanged(); + } else { + resultsBuilder_.setMessage(index, builderForValue.build()); } - size += dataSize; - size += 1 * getCellsLengthList().size(); + return this; } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta) obj; - - boolean result = true; - result = result && getCellsLengthList() - .equals(other.getCellsLengthList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getCellsLengthCount() > 0) { - hash = (37 * hash) + CELLS_LENGTH_FIELD_NUMBER; - hash = (53 * hash) + getCellsLengthList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code ResultCellMeta} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMetaOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_ResultCellMeta_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_ResultCellMeta_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
+ */ + public Builder addResults(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result value) { + if (resultsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureResultsIsMutable(); + results_.add(value); + onChanged(); + } else { + resultsBuilder_.addMessage(value); } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - cellsLength_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); return this; } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_ResultCellMeta_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta build() { - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
+ */ + public Builder addResults( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result value) { + if (resultsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureResultsIsMutable(); + results_.add(index, value); + onChanged(); + } else { + resultsBuilder_.addMessage(index, value); } - return result; + return this; } - - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta result = new org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta(this); - int from_bitField0_ = bitField0_; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - cellsLength_ = java.util.Collections.unmodifiableList(cellsLength_); - bitField0_ = (bitField0_ & ~0x00000001); + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
+ */ + public Builder addResults( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder builderForValue) { + if (resultsBuilder_ == null) { + ensureResultsIsMutable(); + results_.add(builderForValue.build()); + onChanged(); + } else { + resultsBuilder_.addMessage(builderForValue.build()); } - result.cellsLength_ = cellsLength_; - onBuilt(); - return result; + return this; } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta)other); + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
+ */ + public Builder addResults( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder builderForValue) { + if (resultsBuilder_ == null) { + ensureResultsIsMutable(); + results_.add(index, builderForValue.build()); + onChanged(); } else { - super.mergeFrom(other); - return this; + resultsBuilder_.addMessage(index, builderForValue.build()); } + return this; } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.getDefaultInstance()) return this; - if (!other.cellsLength_.isEmpty()) { - if (cellsLength_.isEmpty()) { - cellsLength_ = other.cellsLength_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureCellsLengthIsMutable(); - cellsLength_.addAll(other.cellsLength_); - } + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
+ */ + public Builder addAllResults( + java.lang.Iterable values) { + if (resultsBuilder_ == null) { + ensureResultsIsMutable(); + super.addAll(values, results_); onChanged(); + } else { + resultsBuilder_.addAllMessages(values); } - this.mergeUnknownFields(other.getUnknownFields()); return this; } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
+ */ + public Builder clearResults() { + if (resultsBuilder_ == null) { + results_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + resultsBuilder_.clear(); } return this; } - private int bitField0_; - - // repeated uint32 cells_length = 1; - private java.util.List cellsLength_ = java.util.Collections.emptyList(); - private void ensureCellsLengthIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - cellsLength_ = new java.util.ArrayList(cellsLength_); - bitField0_ |= 0x00000001; - } - } /** - * repeated uint32 cells_length = 1; + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
*/ - public java.util.List - getCellsLengthList() { - return java.util.Collections.unmodifiableList(cellsLength_); + public Builder removeResults(int index) { + if (resultsBuilder_ == null) { + ensureResultsIsMutable(); + results_.remove(index); + onChanged(); + } else { + resultsBuilder_.remove(index); + } + return this; } /** - * repeated uint32 cells_length = 1; + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
*/ - public int getCellsLengthCount() { - return cellsLength_.size(); + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder getResultsBuilder( + int index) { + return getResultsFieldBuilder().getBuilder(index); } /** - * repeated uint32 cells_length = 1; + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
*/ - public int getCellsLength(int index) { - return cellsLength_.get(index); + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder getResultsOrBuilder( + int index) { + if (resultsBuilder_ == null) { + return results_.get(index); } else { + return resultsBuilder_.getMessageOrBuilder(index); + } } /** - * repeated uint32 cells_length = 1; + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
*/ - public Builder setCellsLength( - int index, int value) { - ensureCellsLengthIsMutable(); - cellsLength_.set(index, value); - onChanged(); - return this; + public java.util.List + getResultsOrBuilderList() { + if (resultsBuilder_ != null) { + return resultsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(results_); + } } /** - * repeated uint32 cells_length = 1; + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
*/ - public Builder addCellsLength(int value) { - ensureCellsLengthIsMutable(); - cellsLength_.add(value); - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder addResultsBuilder() { + return getResultsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.getDefaultInstance()); } /** - * repeated uint32 cells_length = 1; + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
*/ - public Builder addAllCellsLength( - java.lang.Iterable values) { - ensureCellsLengthIsMutable(); - super.addAll(values, cellsLength_); - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder addResultsBuilder( + int index) { + return getResultsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.getDefaultInstance()); } /** - * repeated uint32 cells_length = 1; + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * This field is mutually exclusive with cells_per_result (since the Cells will
+       * be inside the pb'd Result)
+       * 
*/ - public Builder clearCellsLength() { - cellsLength_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - return this; + public java.util.List + getResultsBuilderList() { + return getResultsFieldBuilder().getBuilderList(); } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder> + getResultsFieldBuilder() { + if (resultsBuilder_ == null) { + resultsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder>( + results_, + ((bitField0_ & 0x00000010) == 0x00000010), + getParentForChildren(), + isClean()); + results_ = null; + } + return resultsBuilder_; + } - // @@protoc_insertion_point(builder_scope:ResultCellMeta) + // @@protoc_insertion_point(builder_scope:ScanResponse) } static { - defaultInstance = new ResultCellMeta(true); + defaultInstance = new ScanResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:ResultCellMeta) + // @@protoc_insertion_point(class_scope:ScanResponse) } public interface BulkLoadHFileRequestOrBuilder @@ -27429,11 +27553,6 @@ com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_ScanResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor - internal_static_ResultCellMeta_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ResultCellMeta_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor internal_static_BulkLoadHFileRequest_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -27551,42 +27670,41 @@ "egion\030\001 \001(\0132\020.RegionSpecifier\022\023\n\004scan\030\002 " + "\001(\0132\005.Scan\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number" + "_of_rows\030\004 \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n" + - "\rnext_call_seq\030\006 \001(\004\"p\n\014ScanResponse\022)\n\020", - "result_cell_meta\030\001 \001(\0132\017.ResultCellMeta\022" + - "\022\n\nscanner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(" + - "\010\022\013\n\003ttl\030\004 \001(\r\"&\n\016ResultCellMeta\022\024\n\014cell" + - "s_length\030\001 \003(\r\"\263\001\n\024BulkLoadHFileRequest\022" + - " \n\006region\030\001 \002(\0132\020.RegionSpecifier\0225\n\013fam" + - "ily_path\030\002 \003(\0132 .BulkLoadHFileRequest.Fa" + - "milyPath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFami" + - "lyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025" + - "BulkLoadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"a\n" + - "\026CoprocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014", - "service_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022" + - "\017\n\007request\030\004 \002(\014\"d\n\031CoprocessorServiceRe" + - "quest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022" + - "%\n\004call\030\002 \002(\0132\027.CoprocessorServiceCall\"]" + - "\n\032CoprocessorServiceResponse\022 \n\006region\030\001" + - " \002(\0132\020.RegionSpecifier\022\035\n\005value\030\002 \002(\0132\016." + - "NameBytesPair\"B\n\013MultiAction\022 \n\010mutation" + - "\030\001 \001(\0132\016.MutationProto\022\021\n\003get\030\002 \001(\0132\004.Ge" + - "t\"I\n\014ActionResult\022\026\n\005value\030\001 \001(\0132\007.Resul" + - "t\022!\n\texception\030\002 \001(\0132\016.NameBytesPair\"^\n\014", - "MultiRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpe" + - "cifier\022\034\n\006action\030\002 \003(\0132\014.MultiAction\022\016\n\006" + - "atomic\030\003 \001(\010\".\n\rMultiResponse\022\035\n\006result\030" + - "\001 \003(\0132\r.ActionResult2\342\002\n\rClientService\022 " + - "\n\003Get\022\013.GetRequest\032\014.GetResponse\022/\n\010Mult" + - "iGet\022\020.MultiGetRequest\032\021.MultiGetRespons" + - "e\022)\n\006Mutate\022\016.MutateRequest\032\017.MutateResp" + - "onse\022#\n\004Scan\022\014.ScanRequest\032\r.ScanRespons" + - "e\022>\n\rBulkLoadHFile\022\025.BulkLoadHFileReques" + - "t\032\026.BulkLoadHFileResponse\022F\n\013ExecService", - "\022\032.CoprocessorServiceRequest\032\033.Coprocess" + - "orServiceResponse\022&\n\005Multi\022\r.MultiReques" + - "t\032\016.MultiResponseBB\n*org.apache.hadoop.h" + - "base.protobuf.generatedB\014ClientProtosH\001\210" + - "\001\001\240\001\001" + "\rnext_call_seq\030\006 \001(\004\"y\n\014ScanResponse\022\030\n\020", + "cells_per_result\030\001 \003(\r\022\022\n\nscanner_id\030\002 \001" + + "(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\022\030\n" + + "\007results\030\005 \003(\0132\007.Result\"\263\001\n\024BulkLoadHFil" + + "eRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifi" + + "er\0225\n\013family_path\030\002 \003(\0132 .BulkLoadHFileR" + + "equest.FamilyPath\022\026\n\016assign_seq_num\030\003 \001(" + + "\010\032*\n\nFamilyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030" + + "\002 \002(\t\"\'\n\025BulkLoadHFileResponse\022\016\n\006loaded" + + "\030\001 \002(\010\"a\n\026CoprocessorServiceCall\022\013\n\003row\030" + + "\001 \002(\014\022\024\n\014service_name\030\002 \002(\t\022\023\n\013method_na", + "me\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"d\n\031Coprocessor" + + "ServiceRequest\022 \n\006region\030\001 \002(\0132\020.RegionS" + + "pecifier\022%\n\004call\030\002 \002(\0132\027.CoprocessorServ" + + "iceCall\"]\n\032CoprocessorServiceResponse\022 \n" + + "\006region\030\001 \002(\0132\020.RegionSpecifier\022\035\n\005value" + + "\030\002 \002(\0132\016.NameBytesPair\"B\n\013MultiAction\022 \n" + + "\010mutation\030\001 \001(\0132\016.MutationProto\022\021\n\003get\030\002" + + " \001(\0132\004.Get\"I\n\014ActionResult\022\026\n\005value\030\001 \001(" + + "\0132\007.Result\022!\n\texception\030\002 \001(\0132\016.NameByte" + + "sPair\"^\n\014MultiRequest\022 \n\006region\030\001 \002(\0132\020.", + "RegionSpecifier\022\034\n\006action\030\002 \003(\0132\014.MultiA" + + "ction\022\016\n\006atomic\030\003 \001(\010\".\n\rMultiResponse\022\035" + + "\n\006result\030\001 \003(\0132\r.ActionResult2\342\002\n\rClient" + + "Service\022 \n\003Get\022\013.GetRequest\032\014.GetRespons" + + "e\022/\n\010MultiGet\022\020.MultiGetRequest\032\021.MultiG" + + "etResponse\022)\n\006Mutate\022\016.MutateRequest\032\017.M" + + "utateResponse\022#\n\004Scan\022\014.ScanRequest\032\r.Sc" + + "anResponse\022>\n\rBulkLoadHFile\022\025.BulkLoadHF" + + "ileRequest\032\026.BulkLoadHFileResponse\022F\n\013Ex" + + "ecService\022\032.CoprocessorServiceRequest\032\033.", + "CoprocessorServiceResponse\022&\n\005Multi\022\r.Mu" + + "ltiRequest\032\016.MultiResponseBB\n*org.apache" + + ".hadoop.hbase.protobuf.generatedB\014Client" + + "ProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -27688,15 +27806,9 @@ internal_static_ScanResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ScanResponse_descriptor, - new java.lang.String[] { "ResultCellMeta", "ScannerId", "MoreResults", "Ttl", }); - internal_static_ResultCellMeta_descriptor = + new java.lang.String[] { "CellsPerResult", "ScannerId", "MoreResults", "Ttl", "Results", }); + internal_static_BulkLoadHFileRequest_descriptor = getDescriptor().getMessageTypes().get(14); - internal_static_ResultCellMeta_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ResultCellMeta_descriptor, - new java.lang.String[] { "CellsLength", }); - internal_static_BulkLoadHFileRequest_descriptor = - getDescriptor().getMessageTypes().get(15); internal_static_BulkLoadHFileRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_BulkLoadHFileRequest_descriptor, @@ -27708,49 +27820,49 @@ internal_static_BulkLoadHFileRequest_FamilyPath_descriptor, new java.lang.String[] { "Family", "Path", }); internal_static_BulkLoadHFileResponse_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(15); internal_static_BulkLoadHFileResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_BulkLoadHFileResponse_descriptor, new java.lang.String[] { "Loaded", }); internal_static_CoprocessorServiceCall_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(16); internal_static_CoprocessorServiceCall_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CoprocessorServiceCall_descriptor, new java.lang.String[] { "Row", "ServiceName", "MethodName", "Request", }); internal_static_CoprocessorServiceRequest_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(17); internal_static_CoprocessorServiceRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CoprocessorServiceRequest_descriptor, new java.lang.String[] { "Region", "Call", }); internal_static_CoprocessorServiceResponse_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(18); internal_static_CoprocessorServiceResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CoprocessorServiceResponse_descriptor, new java.lang.String[] { "Region", "Value", }); internal_static_MultiAction_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(19); internal_static_MultiAction_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MultiAction_descriptor, new java.lang.String[] { "Mutation", "Get", }); internal_static_ActionResult_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(20); internal_static_ActionResult_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ActionResult_descriptor, new java.lang.String[] { "Value", "Exception", }); internal_static_MultiRequest_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(21); internal_static_MultiRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MultiRequest_descriptor, new java.lang.String[] { "Region", "Action", "Atomic", }); internal_static_MultiResponse_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(22); internal_static_MultiResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MultiResponse_descriptor, Index: hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java =================================================================== --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java (revision 1518379) +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java (working copy) @@ -730,31 +730,31 @@ com.google.protobuf.ByteString getServiceNameBytes(); - // optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + // optional string cell_block_codec_class = 3; /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
      * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-     * if cannot deal.
+     * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
      * 
*/ boolean hasCellBlockCodecClass(); /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
      * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-     * if cannot deal.
+     * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
      * 
*/ java.lang.String getCellBlockCodecClass(); /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
      * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-     * if cannot deal.
+     * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
      * 
*/ com.google.protobuf.ByteString @@ -766,7 +766,7 @@ * *
      * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-     * Class must implement hadoop's CompressionCodec Interface
+     * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
      * 
*/ boolean hasCellBlockCompressorClass(); @@ -775,7 +775,7 @@ * *
      * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-     * Class must implement hadoop's CompressionCodec Interface
+     * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
      * 
*/ java.lang.String getCellBlockCompressorClass(); @@ -784,7 +784,7 @@ * *
      * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-     * Class must implement hadoop's CompressionCodec Interface
+     * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
      * 
*/ com.google.protobuf.ByteString @@ -978,26 +978,26 @@ } } - // optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + // optional string cell_block_codec_class = 3; public static final int CELL_BLOCK_CODEC_CLASS_FIELD_NUMBER = 3; private java.lang.Object cellBlockCodecClass_; /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
      * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-     * if cannot deal.
+     * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
      * 
*/ public boolean hasCellBlockCodecClass() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
      * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-     * if cannot deal.
+     * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
      * 
*/ public java.lang.String getCellBlockCodecClass() { @@ -1015,11 +1015,11 @@ } } /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
      * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-     * if cannot deal.
+     * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
      * 
*/ public com.google.protobuf.ByteString @@ -1044,7 +1044,7 @@ * *
      * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-     * Class must implement hadoop's CompressionCodec Interface
+     * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
      * 
*/ public boolean hasCellBlockCompressorClass() { @@ -1055,7 +1055,7 @@ * *
      * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-     * Class must implement hadoop's CompressionCodec Interface
+     * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
      * 
*/ public java.lang.String getCellBlockCompressorClass() { @@ -1077,7 +1077,7 @@ * *
      * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-     * Class must implement hadoop's CompressionCodec Interface
+     * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
      * 
*/ public com.google.protobuf.ByteString @@ -1097,7 +1097,7 @@ private void initFields() { userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); serviceName_ = ""; - cellBlockCodecClass_ = "org.apache.hadoop.hbase.codec.KeyValueCodec"; + cellBlockCodecClass_ = ""; cellBlockCompressorClass_ = ""; } private byte memoizedIsInitialized = -1; @@ -1349,7 +1349,7 @@ bitField0_ = (bitField0_ & ~0x00000001); serviceName_ = ""; bitField0_ = (bitField0_ & ~0x00000002); - cellBlockCodecClass_ = "org.apache.hadoop.hbase.codec.KeyValueCodec"; + cellBlockCodecClass_ = ""; bitField0_ = (bitField0_ & ~0x00000004); cellBlockCompressorClass_ = ""; bitField0_ = (bitField0_ & ~0x00000008); @@ -1659,25 +1659,25 @@ return this; } - // optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; - private java.lang.Object cellBlockCodecClass_ = "org.apache.hadoop.hbase.codec.KeyValueCodec"; + // optional string cell_block_codec_class = 3; + private java.lang.Object cellBlockCodecClass_ = ""; /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
        * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-       * if cannot deal.
+       * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
        * 
*/ public boolean hasCellBlockCodecClass() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
        * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-       * if cannot deal.
+       * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
        * 
*/ public java.lang.String getCellBlockCodecClass() { @@ -1692,11 +1692,11 @@ } } /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
        * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-       * if cannot deal.
+       * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
        * 
*/ public com.google.protobuf.ByteString @@ -1713,11 +1713,11 @@ } } /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
        * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-       * if cannot deal.
+       * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
        * 
*/ public Builder setCellBlockCodecClass( @@ -1731,11 +1731,11 @@ return this; } /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
        * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-       * if cannot deal.
+       * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
        * 
*/ public Builder clearCellBlockCodecClass() { @@ -1745,11 +1745,11 @@ return this; } /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
        * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-       * if cannot deal.
+       * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
        * 
*/ public Builder setCellBlockCodecClassBytes( @@ -1770,7 +1770,7 @@ * *
        * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-       * Class must implement hadoop's CompressionCodec Interface
+       * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
        * 
*/ public boolean hasCellBlockCompressorClass() { @@ -1781,7 +1781,7 @@ * *
        * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-       * Class must implement hadoop's CompressionCodec Interface
+       * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
        * 
*/ public java.lang.String getCellBlockCompressorClass() { @@ -1800,7 +1800,7 @@ * *
        * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-       * Class must implement hadoop's CompressionCodec Interface
+       * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
        * 
*/ public com.google.protobuf.ByteString @@ -1821,7 +1821,7 @@ * *
        * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-       * Class must implement hadoop's CompressionCodec Interface
+       * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
        * 
*/ public Builder setCellBlockCompressorClass( @@ -1839,7 +1839,7 @@ * *
        * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-       * Class must implement hadoop's CompressionCodec Interface
+       * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
        * 
*/ public Builder clearCellBlockCompressorClass() { @@ -1853,7 +1853,7 @@ * *
        * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-       * Class must implement hadoop's CompressionCodec Interface
+       * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
        * 
*/ public Builder setCellBlockCompressorClassBytes( @@ -6002,25 +6002,24 @@ java.lang.String[] descriptorData = { "\n\tRPC.proto\032\rTracing.proto\032\013hbase.proto\"" + "<\n\017UserInformation\022\026\n\016effective_user\030\001 \002" + - "(\t\022\021\n\treal_user\030\002 \001(\t\"\277\001\n\020ConnectionHead" + + "(\t\022\021\n\treal_user\030\002 \001(\t\"\222\001\n\020ConnectionHead" + "er\022#\n\tuser_info\030\001 \001(\0132\020.UserInformation\022" + - "\024\n\014service_name\030\002 \001(\t\022K\n\026cell_block_code" + - "c_class\030\003 \001(\t:+org.apache.hadoop.hbase.c" + - "odec.KeyValueCodec\022#\n\033cell_block_compres" + - "sor_class\030\004 \001(\t\"\037\n\rCellBlockMeta\022\016\n\006leng" + - "th\030\001 \001(\r\"|\n\021ExceptionResponse\022\034\n\024excepti" + - "on_class_name\030\001 \001(\t\022\023\n\013stack_trace\030\002 \001(\t", - "\022\020\n\010hostname\030\003 \001(\t\022\014\n\004port\030\004 \001(\005\022\024\n\014do_n" + - "ot_retry\030\005 \001(\010\"\254\001\n\rRequestHeader\022\017\n\007call" + - "_id\030\001 \001(\r\022\035\n\ntrace_info\030\002 \001(\0132\t.RPCTInfo" + - "\022\023\n\013method_name\030\003 \001(\t\022\025\n\rrequest_param\030\004" + - " \001(\010\022\'\n\017cell_block_meta\030\005 \001(\0132\016.CellBloc" + - "kMeta\022\026\n\016effective_user\030\006 \001(\t\"q\n\016Respons" + - "eHeader\022\017\n\007call_id\030\001 \001(\r\022%\n\texception\030\002 " + - "\001(\0132\022.ExceptionResponse\022\'\n\017cell_block_me" + - "ta\030\003 \001(\0132\016.CellBlockMetaB<\n*org.apache.h" + - "adoop.hbase.protobuf.generatedB\tRPCProto", - "sH\001\240\001\001" + "\024\n\014service_name\030\002 \001(\t\022\036\n\026cell_block_code" + + "c_class\030\003 \001(\t\022#\n\033cell_block_compressor_c" + + "lass\030\004 \001(\t\"\037\n\rCellBlockMeta\022\016\n\006length\030\001 " + + "\001(\r\"|\n\021ExceptionResponse\022\034\n\024exception_cl" + + "ass_name\030\001 \001(\t\022\023\n\013stack_trace\030\002 \001(\t\022\020\n\010h" + + "ostname\030\003 \001(\t\022\014\n\004port\030\004 \001(\005\022\024\n\014do_not_re", + "try\030\005 \001(\010\"\254\001\n\rRequestHeader\022\017\n\007call_id\030\001" + + " \001(\r\022\035\n\ntrace_info\030\002 \001(\0132\t.RPCTInfo\022\023\n\013m" + + "ethod_name\030\003 \001(\t\022\025\n\rrequest_param\030\004 \001(\010\022" + + "\'\n\017cell_block_meta\030\005 \001(\0132\016.CellBlockMeta" + + "\022\026\n\016effective_user\030\006 \001(\t\"q\n\016ResponseHead" + + "er\022\017\n\007call_id\030\001 \001(\r\022%\n\texception\030\002 \001(\0132\022" + + ".ExceptionResponse\022\'\n\017cell_block_meta\030\003 " + + "\001(\0132\016.CellBlockMetaB<\n*org.apache.hadoop" + + ".hbase.protobuf.generatedB\tRPCProtosH\001\240\001" + + "\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { Index: hbase-protocol/src/main/protobuf/Client.proto =================================================================== --- hbase-protocol/src/main/protobuf/Client.proto (revision 1518379) +++ hbase-protocol/src/main/protobuf/Client.proto (working copy) @@ -263,16 +263,23 @@ * be false. If it is not specified, it means there are more. */ message ScanResponse { - optional ResultCellMeta result_cell_meta = 1; + // This field is filled in if we are doing cellblocks. A cellblock is made up + // of all Cells serialized out as one cellblock BUT responses from a server + // have their Cells grouped by Result. So we can reconstitute the + // Results on the client-side, this field is a list of counts of Cells + // in each Result that makes up the response. For example, if this field + // has 3, 3, 3 in it, then we know that on the client, we are to make + // three Results each of three Cells each. + repeated uint32 cells_per_result = 1; optional uint64 scanner_id = 2; optional bool more_results = 3; optional uint32 ttl = 4; + // If cells are not carried in an accompanying cellblock, then they are pb'd here. + // This field is mutually exclusive with cells_per_result (since the Cells will + // be inside the pb'd Result) + repeated Result results = 5; } -message ResultCellMeta { - repeated uint32 cells_length = 1; -} - /** * Atomically bulk load multiple HFiles (say from different column families) * into an open region. Index: hbase-protocol/src/main/protobuf/RPC.proto =================================================================== --- hbase-protocol/src/main/protobuf/RPC.proto (revision 1518379) +++ hbase-protocol/src/main/protobuf/RPC.proto (working copy) @@ -81,10 +81,10 @@ optional UserInformation user_info = 1; optional string service_name = 2; // Cell block codec we will use sending over optional cell blocks. Server throws exception - // if cannot deal. - optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + // if cannot deal. Null means no codec'ing going on so we are pb all the time (SLOW!!!) + optional string cell_block_codec_class = 3; // Compressor we will use if cell block is compressed. Server will throw exception if not supported. - // Class must implement hadoop's CompressionCodec Interface + // Class must implement hadoop's CompressionCodec Interface. Can't compress if no codec. optional string cell_block_compressor_class = 4; } Index: hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java (revision 1518379) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java (working copy) @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; Index: hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java (revision 1518379) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java (working copy) @@ -25,4 +25,14 @@ * @throws CallerDisconnectedException */ void throwExceptionIfCallerDisconnected(String regionName) throws CallerDisconnectedException; -} + + /** + * If the client connected and specified a codec to use, then we will use this codec making + * cellblocks to return. If the client did not specify a codec, we assume it does not support + * cellblocks and will return all content protobuf'd (though it makes our serving slower). + * We need to ask this question per call because a server could be hosting both clients that + * support cellblocks while fielding requests from clients that do not. + * @return True if the client supports cellblocks, else return all content in pb + */ + boolean isClientCellBlockSupport(); +} \ No newline at end of file Index: hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java (revision 1518379) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java (working copy) @@ -21,6 +21,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION; +import com.google.common.collect.Lists; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; @@ -61,7 +62,6 @@ import javax.security.sasl.SaslException; import javax.security.sasl.SaslServer; -import com.google.common.collect.Lists; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -203,8 +203,8 @@ protected final Configuration conf; + private final int maxQueueSize; private int maxQueueLength; - private int maxQueueSize; protected int socketSendBufferSize; protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm protected final boolean tcpKeepAlive; // if T then use keepalives @@ -450,6 +450,11 @@ } @Override + public boolean isClientCellBlockSupport() { + return this.connection != null && this.connection.codec != null; + } + + @Override public void throwExceptionIfCallerDisconnected(String regionName) throws CallerDisconnectedException { if (!connection.channel.isOpen()) { @@ -1568,7 +1573,9 @@ private void setupCellBlockCodecs(final ConnectionHeader header) throws FatalConnectionException { // TODO: Plug in other supported decoders. + if (!header.hasCellBlockCodecClass()) return; String className = header.getCellBlockCodecClass(); + if (className == null || className.length() == 0) return; try { this.codec = (Codec)Class.forName(className).newInstance(); } catch (Exception e) { @@ -2425,9 +2432,10 @@ } /** - * Needed for delayed calls. We need to be able to store the current call - * so that we can complete it later. - * @return Call the server is currently handling. + * Needed for features such as delayed calls. We need to be able to store the current call + * so that we can complete it later or ask questions of what is supported by the current ongoing + * call. + * @return An RpcCallConext backed by the currently ongoing call (gotten from a thread local) */ public static RpcCallContext getCurrentCall() { return CurCall.get(); Index: hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java (revision 1518379) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java (working copy) @@ -25,12 +25,12 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.security.authorize.PolicyProvider; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader; +import com.google.common.base.Function; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Function; import com.google.protobuf.BlockingService; import com.google.protobuf.Descriptors.MethodDescriptor; import com.google.protobuf.Message; Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 1518379) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (working copy) @@ -59,7 +59,6 @@ import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.Chore; -import org.apache.hadoop.hbase.DaemonThreadFactory; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -104,6 +103,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; +import org.apache.hadoop.hbase.ipc.RpcCallContext; import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; @@ -163,7 +163,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; @@ -2947,8 +2946,8 @@ * @throws ServiceException */ @Override - public ScanResponse scan(final RpcController controller, - final ScanRequest request) throws ServiceException { + public ScanResponse scan(final RpcController controller, final ScanRequest request) + throws ServiceException { Leases.Lease lease = null; String scannerName = null; try { @@ -3011,7 +3010,7 @@ if (!isLoadingCfsOnDemandSet) { scan.setLoadColumnFamiliesOnDemand(region.isLoadingCfsOnDemandDefault()); } - byte[] hasMetrics = scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE); + scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE); region.prepareScanner(scan); if (region.getCoprocessorHost() != null) { scanner = region.getCoprocessorHost().preScannerOpen(scan); @@ -3118,16 +3117,7 @@ moreResults = false; results = null; } else { - ResultCellMeta.Builder rcmBuilder = ResultCellMeta.newBuilder(); - List cellScannables = new ArrayList(results.size()); - for (Result res : results) { - cellScannables.add(res); - rcmBuilder.addCellsLength(res.size()); - } - builder.setResultCellMeta(rcmBuilder.build()); - // TODO is this okey to assume the type and cast - ((PayloadCarryingRpcController) controller).setCellScanner(CellUtil - .createCellScanner(cellScannables)); + formatResults(builder, results, controller); } } finally { // We're done. On way out re-add the above removed lease. @@ -3175,6 +3165,26 @@ } } + private void formatResults(final ScanResponse.Builder builder, final List results, + final RpcController controller) { + if (results == null || results.isEmpty()) return; + RpcCallContext context = RpcServer.getCurrentCall(); + if (context != null && context.isClientCellBlockSupport()) { + List cellScannables = new ArrayList(results.size()); + for (Result res : results) { + cellScannables.add(res); + builder.addCellsPerResult(res.size()); + } + ((PayloadCarryingRpcController)controller). + setCellScanner(CellUtil.createCellScanner(cellScannables)); + } else { + for (Result res: results) { + ClientProtos.Result pbr = ProtobufUtil.toResult(res); + builder.addResults(pbr); + } + } + } + /** * Atomically bulk load several HFiles into an open region * @return true if successful, false is failed but recoverably (no action) Index: hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java (revision 1518379) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestCheckTestClasses.java (working copy) @@ -18,23 +18,12 @@ package org.apache.hadoop.hbase; -import static junit.framework.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import java.io.File; -import java.io.FileFilter; -import java.io.IOException; -import java.lang.reflect.Method; -import java.lang.reflect.Modifier; -import java.net.URL; -import java.util.ArrayList; -import java.util.Enumeration; import java.util.List; -import java.util.regex.Pattern; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.junit.runners.Suite; /** Index: hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java (revision 1518379) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java (working copy) @@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.util.Bytes; @@ -167,11 +166,9 @@ final List cellScannables = new ArrayList(1); cellScannables.add(new Result(kvs)); final ScanResponse.Builder builder = ScanResponse.newBuilder(); - ResultCellMeta.Builder metaBuilder = ResultCellMeta.newBuilder(); for (CellScannable result : cellScannables) { - metaBuilder.addCellsLength(((Result)result).size()); + builder.addCellsPerResult(((Result)result).size()); } - builder.setResultCellMeta(metaBuilder.build()); Mockito.when(implementation.scan((RpcController) Mockito.any(), (ScanRequest) Mockito.any())) .thenThrow(new ServiceException("Server not running (1 of 3)")) .thenThrow(new ServiceException("Server not running (2 of 3)")) Index: hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (revision 1518379) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (working copy) @@ -40,9 +40,6 @@ import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.lang.ArrayUtils; @@ -51,7 +48,6 @@ import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -63,6 +59,7 @@ import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; @@ -94,7 +91,6 @@ import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.log4j.Level; import org.junit.After; Index: hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java (revision 0) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java (working copy) @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertTrue; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Do some ops and prove that client and server can work w/o codecs; that we can pb all the time. + * Good for third-party clients or simple scripts that want to talk direct to hbase. + */ +@Category(MediumTests.class) +public class TestFromClientSideNoCodec { + protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Turn off codec use + TEST_UTIL.getConfiguration().set("hbase.client.default.rpc.codec", ""); + TEST_UTIL.startMiniCluster(1); + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testBasics() throws IOException { + final byte [] t = Bytes.toBytes("testBasics"); + final byte [][] fs = new byte[][] {Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), + Bytes.toBytes("cf3") }; + HTable ht = TEST_UTIL.createTable(t, fs); + // Check put and get. + final byte [] row = Bytes.toBytes("row"); + Put p = new Put(row); + for (byte [] f: fs) p.add(f, f, f); + ht.put(p); + Result r = ht.get(new Get(row)); + int i = 0; + for (CellScanner cellScanner = r.cellScanner(); cellScanner.advance();) { + Cell cell = cellScanner.current(); + byte [] f = fs[i++]; + assertTrue(Bytes.toString(f), + Bytes.equals(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), + f, 0, f.length)); + } + // Check getRowOrBefore + byte [] f = fs[0]; + r = ht.getRowOrBefore(row, f); + assertTrue(r.toString(), r.containsColumn(f, f)); + // Check scan. + ResultScanner scanner = ht.getScanner(new Scan()); + int count = 0; + while ((r = scanner.next()) != null) { + assertTrue(r.list().size() == 3); + count++; + } + assertTrue(count == 1); + } + + @Test + public void testNoCodec() { + Configuration c = new Configuration(); + c.set("hbase.client.default.rpc.codec", ""); + String codec = RpcClient.getDefaultCodec(c); + assertTrue(codec == null || codec.length() == 0); + } +} \ No newline at end of file Index: hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java (revision 1518379) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java (working copy) @@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.Collection; @@ -41,7 +40,6 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MediumTests; -import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -64,7 +62,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category(SmallTests.class) +@Category(MediumTests.class) public class TestRegionObserverScannerOpenHook { private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); static final Path DIR = UTIL.getDataTestDir(); @@ -212,7 +210,6 @@ * region */ @Test - @Category(MediumTests.class) public void testRegionObserverCompactionTimeStacking() throws Exception { // setup a mini cluster so we can do a real compaction on a region Configuration conf = UTIL.getConfiguration(); Index: hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java (revision 1518379) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java (working copy) @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RowMutations; +import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoRequestProto; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoResponseProto; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EmptyRequestProto; @@ -112,13 +113,16 @@ // an echo, just put them back on the controller creating a new block. Tests our block // building. CellScanner cellScanner = pcrc.cellScanner(); - List list = new ArrayList(); - try { - while(cellScanner.advance()) { - list.add(cellScanner.current()); + List list = null; + if (cellScanner != null) { + list = new ArrayList(); + try { + while(cellScanner.advance()) { + list.add(cellScanner.current()); + } + } catch (IOException e) { + throw new ServiceException(e); } - } catch (IOException e) { - throw new ServiceException(e); } cellScanner = CellUtil.createCellScanner(list); ((PayloadCarryingRpcController)controller).setCellScanner(cellScanner); @@ -148,6 +152,38 @@ } /** + * Ensure we do not HAVE TO HAVE a codec. + * @throws InterruptedException + * @throws IOException + */ + @Test + public void testNoCodec() throws InterruptedException, IOException { + Configuration conf = HBaseConfiguration.create(); + RpcClient client = new RpcClient(conf, HConstants.CLUSTER_ID_DEFAULT) { + @Override + Codec getCodec() { + return null; + } + }; + TestRpcServer rpcServer = new TestRpcServer(); + try { + rpcServer.start(); + InetSocketAddress address = rpcServer.getListenerAddress(); + MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); + final String message = "hello"; + EchoRequestProto param = EchoRequestProto.newBuilder().setMessage(message).build(); + Pair r = client.call(md, param, null, + md.getOutputType().toProto(), User.getCurrent(), address, 0); + assertTrue(r.getSecond() == null); + // Silly assertion that the message is in the returned pb. + assertTrue(r.getFirst().toString().contains(message)); + } finally { + client.stop(); + rpcServer.stop(); + } + } + + /** * It is hard to verify the compression is actually happening under the wraps. Hope that if * unsupported, we'll get an exception out of some time (meantime, have to trace it manually * to confirm that compression is happening down in the client and server). @@ -159,11 +195,14 @@ @Test public void testCompressCellBlock() throws IOException, InterruptedException, SecurityException, NoSuchMethodException { - // Currently, you set - Configuration conf = HBaseConfiguration.create(); + Configuration conf = new Configuration(HBaseConfiguration.create()); conf.set("hbase.client.rpc.compressor", GzipCodec.class.getCanonicalName()); + doSimpleTest(conf, new RpcClient(conf, HConstants.CLUSTER_ID_DEFAULT)); + } + + private void doSimpleTest(final Configuration conf, final RpcClient client) + throws InterruptedException, IOException { TestRpcServer rpcServer = new TestRpcServer(); - RpcClient client = new RpcClient(conf, HConstants.CLUSTER_ID_DEFAULT); List cells = new ArrayList(); int count = 3; for (int i = 0; i < count; i++) cells.add(CELL); Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java (revision 1518379) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java (working copy) @@ -84,7 +84,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.regionserver.CompactionRequestor; @@ -382,9 +381,7 @@ long scannerId = request.getScannerId(); Result result = next(scannerId); if (result != null) { - ResultCellMeta.Builder metaBuilder = ResultCellMeta.newBuilder(); - metaBuilder.addCellsLength(result.size()); - builder.setResultCellMeta(metaBuilder.build()); + builder.addCellsPerResult(result.size()); List results = new ArrayList(1); results.add(result); ((PayloadCarryingRpcController) controller).setCellScanner(CellUtil Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java (revision 1518379) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java (working copy) @@ -64,7 +64,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table; @@ -606,9 +605,7 @@ final ScanResponse.Builder builder = ScanResponse.newBuilder(); builder.setMoreResults(true); - ResultCellMeta.Builder metaBuilder = ResultCellMeta.newBuilder(); - metaBuilder.addCellsLength(r.size()); - builder.setResultCellMeta(metaBuilder.build()); + builder.addCellsPerResult(r.size()); final List cellScannables = new ArrayList(1); cellScannables.add(r); Mockito.when(implementation.scan( @@ -1077,9 +1074,7 @@ Result r = MetaMockingUtil.getMetaTableRowResult(REGIONINFO, SERVERNAME_A); final ScanResponse.Builder builder = ScanResponse.newBuilder(); builder.setMoreResults(true); - ResultCellMeta.Builder metaBuilder = ResultCellMeta.newBuilder(); - metaBuilder.addCellsLength(r.size()); - builder.setResultCellMeta(metaBuilder.build()); + builder.addCellsPerResult(r.size()); final List rows = new ArrayList(1); rows.add(r); Answer ans = new Answer() { Index: src/main/docbkx/rpc.xml =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/xml