diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java index 67b3aff..2c07dfc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java @@ -67,16 +67,21 @@ class MultiServerCallable extends RegionServerCallable { // Row Mutations are a set of Puts and/or Deletes all to be applied atomically // on the one row. We do these a row at a time. if (row instanceof RowMutations) { + RowMutations rms = (RowMutations)row; + List cells = null; + MultiRequest multiRequest; try { - RowMutations rms = (RowMutations)row; - // Stick all Cells for all RowMutations in here into 'cells'. Populated when we call - // buildNoDataMultiRequest in the below. - List cells = new ArrayList(rms.getMutations().size()); - // Build a multi request absent its Cell payload (this is the 'nodata' in the below). - MultiRequest multiRequest = - RequestConverter.buildNoDataMultiRequest(regionName, rms, cells); - // Carry the cells over the proxy/pb Service interface using the payload carrying - // rpc controller. + if (isCellBlock()) { + // Stick all Cells for all RowMutations in here into 'cells'. Populated when we call + // buildNoDataMultiRequest in the below. + cells = new ArrayList(rms.getMutations().size()); + // Build a multi request absent its Cell payload (this is the 'nodata' in the below). + multiRequest = RequestConverter.buildNoDataMultiRequest(regionName, rms, cells); + } else { + multiRequest = RequestConverter.buildMultiRequest(regionName, rms); + } + // Carry the cells if any over the proxy/pb Service interface using the payload + // carrying rpc controller. getStub().multi(new PayloadCarryingRpcController(cells), multiRequest); // This multi call does not return results. response.add(regionName, action.getOriginalIndex(), Result.EMPTY_RESULT); @@ -91,14 +96,17 @@ class MultiServerCallable extends RegionServerCallable { if (actions.size() > rowMutations) { Exception ex = null; List results = null; - // Stick all Cells for the multiRequest in here into 'cells'. Gets filled in when we - // call buildNoDataMultiRequest - List cells = new ArrayList(actions.size() - rowMutations); + List cells = null; + MultiRequest multiRequest; try { - // The call to buildNoDataMultiRequest will skip RowMutations. They have - // already been handled above. - MultiRequest multiRequest = - RequestConverter.buildNoDataMultiRequest(regionName, actions, cells); + if (isCellBlock()) { + // Send data in cellblocks. The call to buildNoDataMultiRequest will skip RowMutations. + // They have already been handled above. + cells = new ArrayList(actions.size() - rowMutations); + multiRequest = RequestConverter.buildNoDataMultiRequest(regionName, actions, cells); + } else { + multiRequest = RequestConverter.buildMultiRequest(regionName, actions); + } // Controller optionally carries cell data over the proxy/service boundary and also // optionally ferries cell response data back out again. PayloadCarryingRpcController controller = new PayloadCarryingRpcController(cells); @@ -116,9 +124,20 @@ class MultiServerCallable extends RegionServerCallable { return response; } + + /** + * @return True if we should send data in cellblocks + */ + boolean isCellBlock() { + // This is not exact -- the configuration could have changed on us after connection was set up + // but it will do for now. + String codec = getConnection().getConfiguration().get("hbase.client.rpc.codec", ""); + return codec != null && codec.length() > 0; + } + @Override public void prepare(boolean reload) throws IOException { // Use the location we were given in the constructor rather than go look it up. setStub(getConnection().getClient(getLocation().getServerName())); } -} +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java index a08f375..2870423 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java @@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.io.ByteBufferOutputStream; import org.apache.hadoop.hbase.io.HeapSize; @@ -86,6 +87,7 @@ class IPCUtil { final CellScanner cellScanner) throws IOException { if (cellScanner == null) return null; + if (codec == null) throw new HBaseIOException("A cellScanner but no codec"); int bufferSize = this.cellBlockBuildingInitialBufferSize; if (cellScanner instanceof HeapSize) { long longSize = ((HeapSize)cellScanner).heapSize(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/PayloadCarryingRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/PayloadCarryingRpcController.java index a76e910..d3bc831 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/PayloadCarryingRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/PayloadCarryingRpcController.java @@ -55,7 +55,7 @@ public class PayloadCarryingRpcController implements RpcController, CellScannabl } public PayloadCarryingRpcController(final List cellIterables) { - this.cellScanner = CellUtil.createCellScanner(cellIterables); + this.cellScanner = cellIterables == null? null: CellUtil.createCellScanner(cellIterables); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java index f5ecf9e..2e206e6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java @@ -89,6 +89,7 @@ import org.apache.hadoop.security.token.TokenSelector; import org.cloudera.htrace.Span; import org.cloudera.htrace.Trace; +import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.BlockingRpcChannel; import com.google.protobuf.Descriptors.MethodDescriptor; import com.google.protobuf.Message; @@ -426,7 +427,9 @@ public class RpcClient { if ((userInfoPB = getUserInfo(ticket)) != null) { builder.setUserInfo(userInfoPB); } - builder.setCellBlockCodecClass(this.codec.getClass().getCanonicalName()); + if (this.codec != null) { + builder.setCellBlockCodecClass(this.codec.getClass().getCanonicalName()); + } if (this.compressor != null) { builder.setCellBlockCompressorClass(this.compressor.getClass().getCanonicalName()); } @@ -1249,7 +1252,7 @@ public class RpcClient { this.pingInterval = getPingInterval(conf); this.ipcUtil = new IPCUtil(conf); this.conf = conf; - this.codec = getCodec(conf); + this.codec = getCodec(); this.compressor = getCompressor(conf); this.socketFactory = factory; this.clusterId = clusterId != null ? clusterId : HConstants.CLUSTER_ID_DEFAULT; @@ -1291,18 +1294,28 @@ public class RpcClient { /** * Encapsulate the ugly casting and RuntimeException conversion in private method. - * @param conf * @return Codec to use on this client. */ - private static Codec getCodec(final Configuration conf) { - String className = conf.get("hbase.client.rpc.codec", KeyValueCodec.class.getCanonicalName()); + Codec getCodec() { + // For NO CODEC, "hbase.client.rpc.codec" must be the empty string AND + // "hbase.client.default.rpc.codec" -- because default is to do cell block encoding. + String className = conf.get("hbase.client.rpc.codec", getDefaultCodec(this.conf)); + if (className == null || className.length() == 0) return null; try { - return (Codec)Class.forName(className).newInstance(); + return (Codec)Class.forName(className).newInstance(); } catch (Exception e) { throw new RuntimeException("Failed getting codec " + className, e); } } + @VisibleForTesting + public static String getDefaultCodec(final Configuration c) { + // If "hbase.client.default.rpc.codec" is empty string -- you can't set it to null because + // Configuration will complain -- then no default codec (and we'll pb everything). Else + // default is KeyValueCodec + return c.get("hbase.client.default.rpc.codec", KeyValueCodec.class.getCanonicalName()); + } + /** * Encapsulate the ugly casting and RuntimeException conversion in private method. * @param conf diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java index 4e59cb7..c290c8f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRespo import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ActionResult; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse; @@ -277,36 +276,41 @@ public final class ResponseConverter { */ public static Result[] getResults(CellScanner cellScanner, ScanResponse response) throws IOException { - if (response == null || cellScanner == null) return null; - ResultCellMeta resultCellMeta = response.getResultCellMeta(); - if (resultCellMeta == null) return null; - int noOfResults = resultCellMeta.getCellsLengthCount(); + if (response == null) return null; + int noOfResults = response.getResultsCount(); Result[] results = new Result[noOfResults]; for (int i = 0; i < noOfResults; i++) { - int noOfCells = resultCellMeta.getCellsLength(i); - List cells = new ArrayList(noOfCells); - for (int j = 0; j < noOfCells; j++) { - try { - if (cellScanner.advance() == false) { - // We are not able to retrieve the exact number of cells which ResultCellMeta says us. + if (cellScanner != null) { + // Cells are out in cellblocks. Group them up again as Results. How many to read at a + // time will be found in getCellsLength -- length here is how many Cells in the i'th Result + int noOfCells = response.getCellsLength(i); + List cells = new ArrayList(noOfCells); + for (int j = 0; j < noOfCells; j++) { + try { + if (cellScanner.advance() == false) { + // We are not able to retrieve the exact number of cells which ResultCellMeta says us. + // We have to scan for the same results again. Throwing DNRIOE as a client retry on the + // same scanner will result in OutOfOrderScannerNextException + String msg = "Results sent from server=" + noOfResults + ". But only got " + i + + " results completely at client. Resetting the scanner to scan again."; + LOG.error(msg); + throw new DoNotRetryIOException(msg); + } + } catch (IOException ioe) { + // We are getting IOE while retrieving the cells for Results. // We have to scan for the same results again. Throwing DNRIOE as a client retry on the // same scanner will result in OutOfOrderScannerNextException - String msg = "Results sent from server=" + noOfResults + ". But only got " + i - + " results completely at client. Resetting the scanner to scan again."; - LOG.error(msg); - throw new DoNotRetryIOException(msg); - } - } catch (IOException ioe) { - // We are getting IOE while retrieving the cells for Results. - // We have to scan for the same results again. Throwing DNRIOE as a client retry on the - // same scanner will result in OutOfOrderScannerNextException - LOG.error("Exception while reading cells from result." + LOG.error("Exception while reading cells from result." + "Resetting the scanner to scan again.", ioe); - throw new DoNotRetryIOException("Resetting the scanner.", ioe); + throw new DoNotRetryIOException("Resetting the scanner.", ioe); + } + cells.add(cellScanner.current()); } - cells.add(cellScanner.current()); + results[i] = new Result(cells); + } else { + // Result is pure pb. + results[i] = ProtobufUtil.toResult(response.getResults(i)); } - results[i] = new Result(cells); } return results; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScanner.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScanner.java index 4f98d7e..3a1b152 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScanner.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScanner.java @@ -40,8 +40,8 @@ import org.apache.hadoop.hbase.Cell; * Typical usage: * *
- * while (scanner.next()) {
- *   Cell cell = scanner.get();
+ * while (scanner.advance()) {
+ *   Cell cell = scanner.current();
  *   // do something
  * }
  * 
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java index 7442a5d..b858d35 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java @@ -172,14 +172,17 @@ public final class CellUtil { * @return CellScanner interface over cellIterable */ public static CellScanner createCellScanner(final Iterable cellIterable) { + if (cellIterable == null) return null; return createCellScanner(cellIterable.iterator()); } /** * @param cells - * @return CellScanner interface over cellIterable + * @return CellScanner interface over cellIterable or null if cells is + * null */ public static CellScanner createCellScanner(final Iterator cells) { + if (cells == null) return null; return new CellScanner() { private final Iterator iterator = cells; private Cell current = null; diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java index d09d015..56ed04e 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java @@ -17481,19 +17481,31 @@ public final class ClientProtos { public interface ScanResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { - // optional .ResultCellMeta result_cell_meta = 1; + // repeated uint32 cells_length = 1; /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_length = 1; + * + *
+     * If present, lists the count of Cells out in a cellblock.
+     * 
*/ - boolean hasResultCellMeta(); + java.util.List getCellsLengthList(); /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_length = 1; + * + *
+     * If present, lists the count of Cells out in a cellblock.
+     * 
*/ - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta getResultCellMeta(); + int getCellsLengthCount(); /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_length = 1; + * + *
+     * If present, lists the count of Cells out in a cellblock.
+     * 
*/ - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMetaOrBuilder getResultCellMetaOrBuilder(); + int getCellsLength(int index); // optional uint64 scanner_id = 2; /** @@ -17524,6 +17536,51 @@ public final class ClientProtos { * optional uint32 ttl = 4; */ int getTtl(); + + // repeated .Result results = 5; + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * 
+ */ + java.util.List + getResultsList(); + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result getResults(int index); + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * 
+ */ + int getResultsCount(); + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * 
+ */ + java.util.List + getResultsOrBuilderList(); + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder getResultsOrBuilder( + int index); } /** * Protobuf type {@code ScanResponse} @@ -17582,34 +17639,50 @@ public final class ClientProtos { } break; } + case 8: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + cellsLength_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + cellsLength_.add(input.readUInt32()); + break; + } case 10: { - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = resultCellMeta_.toBuilder(); + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) { + cellsLength_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; } - resultCellMeta_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(resultCellMeta_); - resultCellMeta_ = subBuilder.buildPartial(); + while (input.getBytesUntilLimit() > 0) { + cellsLength_.add(input.readUInt32()); } - bitField0_ |= 0x00000001; + input.popLimit(limit); break; } case 16: { - bitField0_ |= 0x00000002; + bitField0_ |= 0x00000001; scannerId_ = input.readUInt64(); break; } case 24: { - bitField0_ |= 0x00000004; + bitField0_ |= 0x00000002; moreResults_ = input.readBool(); break; } case 32: { - bitField0_ |= 0x00000008; + bitField0_ |= 0x00000004; ttl_ = input.readUInt32(); break; } + case 42: { + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + results_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000010; + } + results_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.PARSER, extensionRegistry)); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -17618,6 +17691,12 @@ public final class ClientProtos { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + cellsLength_ = java.util.Collections.unmodifiableList(cellsLength_); + } + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + results_ = java.util.Collections.unmodifiableList(results_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -17650,26 +17729,39 @@ public final class ClientProtos { } private int bitField0_; - // optional .ResultCellMeta result_cell_meta = 1; - public static final int RESULT_CELL_META_FIELD_NUMBER = 1; - private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta resultCellMeta_; + // repeated uint32 cells_length = 1; + public static final int CELLS_LENGTH_FIELD_NUMBER = 1; + private java.util.List cellsLength_; /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_length = 1; + * + *
+     * If present, lists the count of Cells out in a cellblock.
+     * 
*/ - public boolean hasResultCellMeta() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List + getCellsLengthList() { + return cellsLength_; } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_length = 1; + * + *
+     * If present, lists the count of Cells out in a cellblock.
+     * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta getResultCellMeta() { - return resultCellMeta_; + public int getCellsLengthCount() { + return cellsLength_.size(); } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_length = 1; + * + *
+     * If present, lists the count of Cells out in a cellblock.
+     * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMetaOrBuilder getResultCellMetaOrBuilder() { - return resultCellMeta_; + public int getCellsLength(int index) { + return cellsLength_.get(index); } // optional uint64 scanner_id = 2; @@ -17679,7 +17771,7 @@ public final class ClientProtos { * optional uint64 scanner_id = 2; */ public boolean hasScannerId() { - return ((bitField0_ & 0x00000002) == 0x00000002); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional uint64 scanner_id = 2; @@ -17695,7 +17787,7 @@ public final class ClientProtos { * optional bool more_results = 3; */ public boolean hasMoreResults() { - return ((bitField0_ & 0x00000004) == 0x00000004); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bool more_results = 3; @@ -17711,7 +17803,7 @@ public final class ClientProtos { * optional uint32 ttl = 4; */ public boolean hasTtl() { - return ((bitField0_ & 0x00000008) == 0x00000008); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional uint32 ttl = 4; @@ -17720,11 +17812,68 @@ public final class ClientProtos { return ttl_; } + // repeated .Result results = 5; + public static final int RESULTS_FIELD_NUMBER = 5; + private java.util.List results_; + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * 
+ */ + public java.util.List getResultsList() { + return results_; + } + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * 
+ */ + public java.util.List + getResultsOrBuilderList() { + return results_; + } + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * 
+ */ + public int getResultsCount() { + return results_.size(); + } + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result getResults(int index) { + return results_.get(index); + } + /** + * repeated .Result results = 5; + * + *
+     * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder getResultsOrBuilder( + int index) { + return results_.get(index); + } + private void initFields() { - resultCellMeta_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.getDefaultInstance(); + cellsLength_ = java.util.Collections.emptyList(); scannerId_ = 0L; moreResults_ = false; ttl_ = 0; + results_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -17738,18 +17887,21 @@ public final class ClientProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, resultCellMeta_); + for (int i = 0; i < cellsLength_.size(); i++) { + output.writeUInt32(1, cellsLength_.get(i)); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(2, scannerId_); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBool(3, moreResults_); } - if (((bitField0_ & 0x00000008) == 0x00000008)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt32(4, ttl_); } + for (int i = 0; i < results_.size(); i++) { + output.writeMessage(5, results_.get(i)); + } getUnknownFields().writeTo(output); } @@ -17759,21 +17911,30 @@ public final class ClientProtos { if (size != -1) return size; size = 0; + { + int dataSize = 0; + for (int i = 0; i < cellsLength_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeUInt32SizeNoTag(cellsLength_.get(i)); + } + size += dataSize; + size += 1 * getCellsLengthList().size(); + } if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, resultCellMeta_); + .computeUInt64Size(2, scannerId_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, scannerId_); + .computeBoolSize(3, moreResults_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream - .computeBoolSize(3, moreResults_); + .computeUInt32Size(4, ttl_); } - if (((bitField0_ & 0x00000008) == 0x00000008)) { + for (int i = 0; i < results_.size(); i++) { size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(4, ttl_); + .computeMessageSize(5, results_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -17798,11 +17959,8 @@ public final class ClientProtos { org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse) obj; boolean result = true; - result = result && (hasResultCellMeta() == other.hasResultCellMeta()); - if (hasResultCellMeta()) { - result = result && getResultCellMeta() - .equals(other.getResultCellMeta()); - } + result = result && getCellsLengthList() + .equals(other.getCellsLengthList()); result = result && (hasScannerId() == other.hasScannerId()); if (hasScannerId()) { result = result && (getScannerId() @@ -17818,6 +17976,8 @@ public final class ClientProtos { result = result && (getTtl() == other.getTtl()); } + result = result && getResultsList() + .equals(other.getResultsList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -17831,9 +17991,9 @@ public final class ClientProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasResultCellMeta()) { - hash = (37 * hash) + RESULT_CELL_META_FIELD_NUMBER; - hash = (53 * hash) + getResultCellMeta().hashCode(); + if (getCellsLengthCount() > 0) { + hash = (37 * hash) + CELLS_LENGTH_FIELD_NUMBER; + hash = (53 * hash) + getCellsLengthList().hashCode(); } if (hasScannerId()) { hash = (37 * hash) + SCANNER_ID_FIELD_NUMBER; @@ -17847,6 +18007,10 @@ public final class ClientProtos { hash = (37 * hash) + TTL_FIELD_NUMBER; hash = (53 * hash) + getTtl(); } + if (getResultsCount() > 0) { + hash = (37 * hash) + RESULTS_FIELD_NUMBER; + hash = (53 * hash) + getResultsList().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -17954,7 +18118,7 @@ public final class ClientProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getResultCellMetaFieldBuilder(); + getResultsFieldBuilder(); } } private static Builder create() { @@ -17963,11 +18127,7 @@ public final class ClientProtos { public Builder clear() { super.clear(); - if (resultCellMetaBuilder_ == null) { - resultCellMeta_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.getDefaultInstance(); - } else { - resultCellMetaBuilder_.clear(); - } + cellsLength_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); scannerId_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); @@ -17975,6 +18135,12 @@ public final class ClientProtos { bitField0_ = (bitField0_ & ~0x00000004); ttl_ = 0; bitField0_ = (bitField0_ & ~0x00000008); + if (resultsBuilder_ == null) { + results_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + } else { + resultsBuilder_.clear(); + } return this; } @@ -18003,26 +18169,32 @@ public final class ClientProtos { org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse result = new org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (resultCellMetaBuilder_ == null) { - result.resultCellMeta_ = resultCellMeta_; - } else { - result.resultCellMeta_ = resultCellMetaBuilder_.build(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + cellsLength_ = java.util.Collections.unmodifiableList(cellsLength_); + bitField0_ = (bitField0_ & ~0x00000001); } + result.cellsLength_ = cellsLength_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; + to_bitField0_ |= 0x00000001; } result.scannerId_ = scannerId_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; + to_bitField0_ |= 0x00000002; } result.moreResults_ = moreResults_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; + to_bitField0_ |= 0x00000004; } result.ttl_ = ttl_; + if (resultsBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { + results_ = java.util.Collections.unmodifiableList(results_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.results_ = results_; + } else { + result.results_ = resultsBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -18039,8 +18211,15 @@ public final class ClientProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse other) { if (other == org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse.getDefaultInstance()) return this; - if (other.hasResultCellMeta()) { - mergeResultCellMeta(other.getResultCellMeta()); + if (!other.cellsLength_.isEmpty()) { + if (cellsLength_.isEmpty()) { + cellsLength_ = other.cellsLength_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureCellsLengthIsMutable(); + cellsLength_.addAll(other.cellsLength_); + } + onChanged(); } if (other.hasScannerId()) { setScannerId(other.getScannerId()); @@ -18051,6 +18230,32 @@ public final class ClientProtos { if (other.hasTtl()) { setTtl(other.getTtl()); } + if (resultsBuilder_ == null) { + if (!other.results_.isEmpty()) { + if (results_.isEmpty()) { + results_ = other.results_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureResultsIsMutable(); + results_.addAll(other.results_); + } + onChanged(); + } + } else { + if (!other.results_.isEmpty()) { + if (resultsBuilder_.isEmpty()) { + resultsBuilder_.dispose(); + resultsBuilder_ = null; + results_ = other.results_; + bitField0_ = (bitField0_ & ~0x00000010); + resultsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getResultsFieldBuilder() : null; + } else { + resultsBuilder_.addAllMessages(other.results_); + } + } + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -18078,121 +18283,98 @@ public final class ClientProtos { } private int bitField0_; - // optional .ResultCellMeta result_cell_meta = 1; - private org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta resultCellMeta_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMetaOrBuilder> resultCellMetaBuilder_; + // repeated uint32 cells_length = 1; + private java.util.List cellsLength_ = java.util.Collections.emptyList(); + private void ensureCellsLengthIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + cellsLength_ = new java.util.ArrayList(cellsLength_); + bitField0_ |= 0x00000001; + } + } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_length = 1; + * + *
+       * If present, lists the count of Cells out in a cellblock.
+       * 
*/ - public boolean hasResultCellMeta() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public java.util.List + getCellsLengthList() { + return java.util.Collections.unmodifiableList(cellsLength_); } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_length = 1; + * + *
+       * If present, lists the count of Cells out in a cellblock.
+       * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta getResultCellMeta() { - if (resultCellMetaBuilder_ == null) { - return resultCellMeta_; - } else { - return resultCellMetaBuilder_.getMessage(); - } + public int getCellsLengthCount() { + return cellsLength_.size(); } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_length = 1; + * + *
+       * If present, lists the count of Cells out in a cellblock.
+       * 
*/ - public Builder setResultCellMeta(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta value) { - if (resultCellMetaBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - resultCellMeta_ = value; - onChanged(); - } else { - resultCellMetaBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; + public int getCellsLength(int index) { + return cellsLength_.get(index); } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_length = 1; + * + *
+       * If present, lists the count of Cells out in a cellblock.
+       * 
*/ - public Builder setResultCellMeta( - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.Builder builderForValue) { - if (resultCellMetaBuilder_ == null) { - resultCellMeta_ = builderForValue.build(); - onChanged(); - } else { - resultCellMetaBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; + public Builder setCellsLength( + int index, int value) { + ensureCellsLengthIsMutable(); + cellsLength_.set(index, value); + onChanged(); return this; } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_length = 1; + * + *
+       * If present, lists the count of Cells out in a cellblock.
+       * 
*/ - public Builder mergeResultCellMeta(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta value) { - if (resultCellMetaBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - resultCellMeta_ != org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.getDefaultInstance()) { - resultCellMeta_ = - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.newBuilder(resultCellMeta_).mergeFrom(value).buildPartial(); - } else { - resultCellMeta_ = value; - } - onChanged(); - } else { - resultCellMetaBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; + public Builder addCellsLength(int value) { + ensureCellsLengthIsMutable(); + cellsLength_.add(value); + onChanged(); return this; } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_length = 1; + * + *
+       * If present, lists the count of Cells out in a cellblock.
+       * 
*/ - public Builder clearResultCellMeta() { - if (resultCellMetaBuilder_ == null) { - resultCellMeta_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.getDefaultInstance(); - onChanged(); - } else { - resultCellMetaBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); + public Builder addAllCellsLength( + java.lang.Iterable values) { + ensureCellsLengthIsMutable(); + super.addAll(values, cellsLength_); + onChanged(); return this; } /** - * optional .ResultCellMeta result_cell_meta = 1; + * repeated uint32 cells_length = 1; + * + *
+       * If present, lists the count of Cells out in a cellblock.
+       * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.Builder getResultCellMetaBuilder() { - bitField0_ |= 0x00000001; + public Builder clearCellsLength() { + cellsLength_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); onChanged(); - return getResultCellMetaFieldBuilder().getBuilder(); - } - /** - * optional .ResultCellMeta result_cell_meta = 1; - */ - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMetaOrBuilder getResultCellMetaOrBuilder() { - if (resultCellMetaBuilder_ != null) { - return resultCellMetaBuilder_.getMessageOrBuilder(); - } else { - return resultCellMeta_; - } - } - /** - * optional .ResultCellMeta result_cell_meta = 1; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMetaOrBuilder> - getResultCellMetaFieldBuilder() { - if (resultCellMetaBuilder_ == null) { - resultCellMetaBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMetaOrBuilder>( - resultCellMeta_, - getParentForChildren(), - isClean()); - resultCellMeta_ = null; - } - return resultCellMetaBuilder_; + return this; } // optional uint64 scanner_id = 2; @@ -18294,519 +18476,327 @@ public final class ClientProtos { return this; } - // @@protoc_insertion_point(builder_scope:ScanResponse) - } - - static { - defaultInstance = new ScanResponse(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:ScanResponse) - } - - public interface ResultCellMetaOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // repeated uint32 cells_length = 1; - /** - * repeated uint32 cells_length = 1; - */ - java.util.List getCellsLengthList(); - /** - * repeated uint32 cells_length = 1; - */ - int getCellsLengthCount(); - /** - * repeated uint32 cells_length = 1; - */ - int getCellsLength(int index); - } - /** - * Protobuf type {@code ResultCellMeta} - */ - public static final class ResultCellMeta extends - com.google.protobuf.GeneratedMessage - implements ResultCellMetaOrBuilder { - // Use ResultCellMeta.newBuilder() to construct. - private ResultCellMeta(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private ResultCellMeta(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final ResultCellMeta defaultInstance; - public static ResultCellMeta getDefaultInstance() { - return defaultInstance; - } - - public ResultCellMeta getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private ResultCellMeta( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - cellsLength_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - cellsLength_.add(input.readUInt32()); - break; - } - case 10: { - int length = input.readRawVarint32(); - int limit = input.pushLimit(length); - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) { - cellsLength_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - while (input.getBytesUntilLimit() > 0) { - cellsLength_.add(input.readUInt32()); - } - input.popLimit(limit); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - cellsLength_ = java.util.Collections.unmodifiableList(cellsLength_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_ResultCellMeta_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_ResultCellMeta_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ResultCellMeta parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ResultCellMeta(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - // repeated uint32 cells_length = 1; - public static final int CELLS_LENGTH_FIELD_NUMBER = 1; - private java.util.List cellsLength_; - /** - * repeated uint32 cells_length = 1; - */ - public java.util.List - getCellsLengthList() { - return cellsLength_; - } - /** - * repeated uint32 cells_length = 1; - */ - public int getCellsLengthCount() { - return cellsLength_.size(); - } - /** - * repeated uint32 cells_length = 1; - */ - public int getCellsLength(int index) { - return cellsLength_.get(index); - } - - private void initFields() { - cellsLength_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < cellsLength_.size(); i++) { - output.writeUInt32(1, cellsLength_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - { - int dataSize = 0; - for (int i = 0; i < cellsLength_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeUInt32SizeNoTag(cellsLength_.get(i)); - } - size += dataSize; - size += 1 * getCellsLengthList().size(); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta other = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta) obj; - - boolean result = true; - result = result && getCellsLengthList() - .equals(other.getCellsLengthList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (getCellsLengthCount() > 0) { - hash = (37 * hash) + CELLS_LENGTH_FIELD_NUMBER; - hash = (53 * hash) + getCellsLengthList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code ResultCellMeta} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMetaOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_ResultCellMeta_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_ResultCellMeta_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - cellsLength_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.internal_static_ResultCellMeta_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.getDefaultInstance(); + // repeated .Result results = 5; + private java.util.List results_ = + java.util.Collections.emptyList(); + private void ensureResultsIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + results_ = new java.util.ArrayList(results_); + bitField0_ |= 0x00000010; + } } - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta build() { - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder> resultsBuilder_; + + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
+ */ + public java.util.List getResultsList() { + if (resultsBuilder_ == null) { + return java.util.Collections.unmodifiableList(results_); + } else { + return resultsBuilder_.getMessageList(); } - return result; } - - public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta result = new org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta(this); - int from_bitField0_ = bitField0_; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - cellsLength_ = java.util.Collections.unmodifiableList(cellsLength_); - bitField0_ = (bitField0_ & ~0x00000001); + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
+ */ + public int getResultsCount() { + if (resultsBuilder_ == null) { + return results_.size(); + } else { + return resultsBuilder_.getCount(); } - result.cellsLength_ = cellsLength_; - onBuilt(); - return result; } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta)other); + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result getResults(int index) { + if (resultsBuilder_ == null) { + return results_.get(index); } else { - super.mergeFrom(other); - return this; + return resultsBuilder_.getMessage(index); } } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta.getDefaultInstance()) return this; - if (!other.cellsLength_.isEmpty()) { - if (cellsLength_.isEmpty()) { - cellsLength_ = other.cellsLength_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureCellsLengthIsMutable(); - cellsLength_.addAll(other.cellsLength_); + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
+ */ + public Builder setResults( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result value) { + if (resultsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); } + ensureResultsIsMutable(); + results_.set(index, value); onChanged(); + } else { + resultsBuilder_.setMessage(index, value); } - this.mergeUnknownFields(other.getUnknownFields()); return this; } - - public final boolean isInitialized() { - return true; + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
+ */ + public Builder setResults( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder builderForValue) { + if (resultsBuilder_ == null) { + ensureResultsIsMutable(); + results_.set(index, builderForValue.build()); + onChanged(); + } else { + resultsBuilder_.setMessage(index, builderForValue.build()); + } + return this; } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
+ */ + public Builder addResults(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result value) { + if (resultsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); } + ensureResultsIsMutable(); + results_.add(value); + onChanged(); + } else { + resultsBuilder_.addMessage(value); } return this; } - private int bitField0_; - - // repeated uint32 cells_length = 1; - private java.util.List cellsLength_ = java.util.Collections.emptyList(); - private void ensureCellsLengthIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - cellsLength_ = new java.util.ArrayList(cellsLength_); - bitField0_ |= 0x00000001; - } - } /** - * repeated uint32 cells_length = 1; + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
*/ - public java.util.List - getCellsLengthList() { - return java.util.Collections.unmodifiableList(cellsLength_); + public Builder addResults( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result value) { + if (resultsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureResultsIsMutable(); + results_.add(index, value); + onChanged(); + } else { + resultsBuilder_.addMessage(index, value); + } + return this; } /** - * repeated uint32 cells_length = 1; + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
*/ - public int getCellsLengthCount() { - return cellsLength_.size(); + public Builder addResults( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder builderForValue) { + if (resultsBuilder_ == null) { + ensureResultsIsMutable(); + results_.add(builderForValue.build()); + onChanged(); + } else { + resultsBuilder_.addMessage(builderForValue.build()); + } + return this; } /** - * repeated uint32 cells_length = 1; + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
*/ - public int getCellsLength(int index) { - return cellsLength_.get(index); + public Builder addResults( + int index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder builderForValue) { + if (resultsBuilder_ == null) { + ensureResultsIsMutable(); + results_.add(index, builderForValue.build()); + onChanged(); + } else { + resultsBuilder_.addMessage(index, builderForValue.build()); + } + return this; } /** - * repeated uint32 cells_length = 1; + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
*/ - public Builder setCellsLength( - int index, int value) { - ensureCellsLengthIsMutable(); - cellsLength_.set(index, value); - onChanged(); + public Builder addAllResults( + java.lang.Iterable values) { + if (resultsBuilder_ == null) { + ensureResultsIsMutable(); + super.addAll(values, results_); + onChanged(); + } else { + resultsBuilder_.addAllMessages(values); + } return this; } /** - * repeated uint32 cells_length = 1; + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
*/ - public Builder addCellsLength(int value) { - ensureCellsLengthIsMutable(); - cellsLength_.add(value); - onChanged(); + public Builder clearResults() { + if (resultsBuilder_ == null) { + results_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + resultsBuilder_.clear(); + } return this; } /** - * repeated uint32 cells_length = 1; + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
*/ - public Builder addAllCellsLength( - java.lang.Iterable values) { - ensureCellsLengthIsMutable(); - super.addAll(values, cellsLength_); - onChanged(); + public Builder removeResults(int index) { + if (resultsBuilder_ == null) { + ensureResultsIsMutable(); + results_.remove(index); + onChanged(); + } else { + resultsBuilder_.remove(index); + } return this; } /** - * repeated uint32 cells_length = 1; + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
*/ - public Builder clearCellsLength() { - cellsLength_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder getResultsBuilder( + int index) { + return getResultsFieldBuilder().getBuilder(index); + } + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder getResultsOrBuilder( + int index) { + if (resultsBuilder_ == null) { + return results_.get(index); } else { + return resultsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
+ */ + public java.util.List + getResultsOrBuilderList() { + if (resultsBuilder_ != null) { + return resultsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(results_); + } + } + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder addResultsBuilder() { + return getResultsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.getDefaultInstance()); + } + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder addResultsBuilder( + int index) { + return getResultsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.getDefaultInstance()); + } + /** + * repeated .Result results = 5; + * + *
+       * If cells are not carried in an accompanying cellblock, then they are pb'd here.
+       * 
+ */ + public java.util.List + getResultsBuilderList() { + return getResultsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder> + getResultsFieldBuilder() { + if (resultsBuilder_ == null) { + resultsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Result.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrBuilder>( + results_, + ((bitField0_ & 0x00000010) == 0x00000010), + getParentForChildren(), + isClean()); + results_ = null; + } + return resultsBuilder_; } - // @@protoc_insertion_point(builder_scope:ResultCellMeta) + // @@protoc_insertion_point(builder_scope:ScanResponse) } static { - defaultInstance = new ResultCellMeta(true); + defaultInstance = new ScanResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:ResultCellMeta) + // @@protoc_insertion_point(class_scope:ScanResponse) } public interface BulkLoadHFileRequestOrBuilder @@ -27429,11 +27419,6 @@ public final class ClientProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_ScanResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor - internal_static_ResultCellMeta_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_ResultCellMeta_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor internal_static_BulkLoadHFileRequest_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -27551,42 +27536,41 @@ public final class ClientProtos { "egion\030\001 \001(\0132\020.RegionSpecifier\022\023\n\004scan\030\002 " + "\001(\0132\005.Scan\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number" + "_of_rows\030\004 \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n" + - "\rnext_call_seq\030\006 \001(\004\"p\n\014ScanResponse\022)\n\020", - "result_cell_meta\030\001 \001(\0132\017.ResultCellMeta\022" + - "\022\n\nscanner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(" + - "\010\022\013\n\003ttl\030\004 \001(\r\"&\n\016ResultCellMeta\022\024\n\014cell" + - "s_length\030\001 \003(\r\"\263\001\n\024BulkLoadHFileRequest\022" + - " \n\006region\030\001 \002(\0132\020.RegionSpecifier\0225\n\013fam" + - "ily_path\030\002 \003(\0132 .BulkLoadHFileRequest.Fa" + - "milyPath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFami" + - "lyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025" + - "BulkLoadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"a\n" + - "\026CoprocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014", - "service_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022" + - "\017\n\007request\030\004 \002(\014\"d\n\031CoprocessorServiceRe" + - "quest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022" + - "%\n\004call\030\002 \002(\0132\027.CoprocessorServiceCall\"]" + - "\n\032CoprocessorServiceResponse\022 \n\006region\030\001" + - " \002(\0132\020.RegionSpecifier\022\035\n\005value\030\002 \002(\0132\016." + - "NameBytesPair\"B\n\013MultiAction\022 \n\010mutation" + - "\030\001 \001(\0132\016.MutationProto\022\021\n\003get\030\002 \001(\0132\004.Ge" + - "t\"I\n\014ActionResult\022\026\n\005value\030\001 \001(\0132\007.Resul" + - "t\022!\n\texception\030\002 \001(\0132\016.NameBytesPair\"^\n\014", - "MultiRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpe" + - "cifier\022\034\n\006action\030\002 \003(\0132\014.MultiAction\022\016\n\006" + - "atomic\030\003 \001(\010\".\n\rMultiResponse\022\035\n\006result\030" + - "\001 \003(\0132\r.ActionResult2\342\002\n\rClientService\022 " + - "\n\003Get\022\013.GetRequest\032\014.GetResponse\022/\n\010Mult" + - "iGet\022\020.MultiGetRequest\032\021.MultiGetRespons" + - "e\022)\n\006Mutate\022\016.MutateRequest\032\017.MutateResp" + - "onse\022#\n\004Scan\022\014.ScanRequest\032\r.ScanRespons" + - "e\022>\n\rBulkLoadHFile\022\025.BulkLoadHFileReques" + - "t\032\026.BulkLoadHFileResponse\022F\n\013ExecService", - "\022\032.CoprocessorServiceRequest\032\033.Coprocess" + - "orServiceResponse\022&\n\005Multi\022\r.MultiReques" + - "t\032\016.MultiResponseBB\n*org.apache.hadoop.h" + - "base.protobuf.generatedB\014ClientProtosH\001\210" + - "\001\001\240\001\001" + "\rnext_call_seq\030\006 \001(\004\"u\n\014ScanResponse\022\024\n\014", + "cells_length\030\001 \003(\r\022\022\n\nscanner_id\030\002 \001(\004\022\024" + + "\n\014more_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\022\030\n\007res" + + "ults\030\005 \003(\0132\007.Result\"\263\001\n\024BulkLoadHFileReq" + + "uest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\0225" + + "\n\013family_path\030\002 \003(\0132 .BulkLoadHFileReque" + + "st.FamilyPath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n" + + "\nFamilyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(" + + "\t\"\'\n\025BulkLoadHFileResponse\022\016\n\006loaded\030\001 \002" + + "(\010\"a\n\026CoprocessorServiceCall\022\013\n\003row\030\001 \002(" + + "\014\022\024\n\014service_name\030\002 \002(\t\022\023\n\013method_name\030\003", + " \002(\t\022\017\n\007request\030\004 \002(\014\"d\n\031CoprocessorServ" + + "iceRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpeci" + + "fier\022%\n\004call\030\002 \002(\0132\027.CoprocessorServiceC" + + "all\"]\n\032CoprocessorServiceResponse\022 \n\006reg" + + "ion\030\001 \002(\0132\020.RegionSpecifier\022\035\n\005value\030\002 \002" + + "(\0132\016.NameBytesPair\"B\n\013MultiAction\022 \n\010mut" + + "ation\030\001 \001(\0132\016.MutationProto\022\021\n\003get\030\002 \001(\013" + + "2\004.Get\"I\n\014ActionResult\022\026\n\005value\030\001 \001(\0132\007." + + "Result\022!\n\texception\030\002 \001(\0132\016.NameBytesPai" + + "r\"^\n\014MultiRequest\022 \n\006region\030\001 \002(\0132\020.Regi", + "onSpecifier\022\034\n\006action\030\002 \003(\0132\014.MultiActio" + + "n\022\016\n\006atomic\030\003 \001(\010\".\n\rMultiResponse\022\035\n\006re" + + "sult\030\001 \003(\0132\r.ActionResult2\342\002\n\rClientServ" + + "ice\022 \n\003Get\022\013.GetRequest\032\014.GetResponse\022/\n" + + "\010MultiGet\022\020.MultiGetRequest\032\021.MultiGetRe" + + "sponse\022)\n\006Mutate\022\016.MutateRequest\032\017.Mutat" + + "eResponse\022#\n\004Scan\022\014.ScanRequest\032\r.ScanRe" + + "sponse\022>\n\rBulkLoadHFile\022\025.BulkLoadHFileR" + + "equest\032\026.BulkLoadHFileResponse\022F\n\013ExecSe" + + "rvice\022\032.CoprocessorServiceRequest\032\033.Copr", + "ocessorServiceResponse\022&\n\005Multi\022\r.MultiR" + + "equest\032\016.MultiResponseBB\n*org.apache.had" + + "oop.hbase.protobuf.generatedB\014ClientProt" + + "osH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -27688,15 +27672,9 @@ public final class ClientProtos { internal_static_ScanResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ScanResponse_descriptor, - new java.lang.String[] { "ResultCellMeta", "ScannerId", "MoreResults", "Ttl", }); - internal_static_ResultCellMeta_descriptor = - getDescriptor().getMessageTypes().get(14); - internal_static_ResultCellMeta_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_ResultCellMeta_descriptor, - new java.lang.String[] { "CellsLength", }); + new java.lang.String[] { "CellsLength", "ScannerId", "MoreResults", "Ttl", "Results", }); internal_static_BulkLoadHFileRequest_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(14); internal_static_BulkLoadHFileRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_BulkLoadHFileRequest_descriptor, @@ -27708,49 +27686,49 @@ public final class ClientProtos { internal_static_BulkLoadHFileRequest_FamilyPath_descriptor, new java.lang.String[] { "Family", "Path", }); internal_static_BulkLoadHFileResponse_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(15); internal_static_BulkLoadHFileResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_BulkLoadHFileResponse_descriptor, new java.lang.String[] { "Loaded", }); internal_static_CoprocessorServiceCall_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(16); internal_static_CoprocessorServiceCall_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CoprocessorServiceCall_descriptor, new java.lang.String[] { "Row", "ServiceName", "MethodName", "Request", }); internal_static_CoprocessorServiceRequest_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(17); internal_static_CoprocessorServiceRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CoprocessorServiceRequest_descriptor, new java.lang.String[] { "Region", "Call", }); internal_static_CoprocessorServiceResponse_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(18); internal_static_CoprocessorServiceResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CoprocessorServiceResponse_descriptor, new java.lang.String[] { "Region", "Value", }); internal_static_MultiAction_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(19); internal_static_MultiAction_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MultiAction_descriptor, new java.lang.String[] { "Mutation", "Get", }); internal_static_ActionResult_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(20); internal_static_ActionResult_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ActionResult_descriptor, new java.lang.String[] { "Value", "Exception", }); internal_static_MultiRequest_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(21); internal_static_MultiRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MultiRequest_descriptor, new java.lang.String[] { "Region", "Action", "Atomic", }); internal_static_MultiResponse_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(22); internal_static_MultiResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MultiResponse_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java index 0df4e51..89a3ffa 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java @@ -730,31 +730,31 @@ public final class RPCProtos { com.google.protobuf.ByteString getServiceNameBytes(); - // optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + // optional string cell_block_codec_class = 3; /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
      * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-     * if cannot deal.
+     * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
      * 
*/ boolean hasCellBlockCodecClass(); /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
      * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-     * if cannot deal.
+     * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
      * 
*/ java.lang.String getCellBlockCodecClass(); /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
      * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-     * if cannot deal.
+     * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
      * 
*/ com.google.protobuf.ByteString @@ -766,7 +766,7 @@ public final class RPCProtos { * *
      * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-     * Class must implement hadoop's CompressionCodec Interface
+     * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
      * 
*/ boolean hasCellBlockCompressorClass(); @@ -775,7 +775,7 @@ public final class RPCProtos { * *
      * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-     * Class must implement hadoop's CompressionCodec Interface
+     * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
      * 
*/ java.lang.String getCellBlockCompressorClass(); @@ -784,7 +784,7 @@ public final class RPCProtos { * *
      * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-     * Class must implement hadoop's CompressionCodec Interface
+     * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
      * 
*/ com.google.protobuf.ByteString @@ -978,26 +978,26 @@ public final class RPCProtos { } } - // optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + // optional string cell_block_codec_class = 3; public static final int CELL_BLOCK_CODEC_CLASS_FIELD_NUMBER = 3; private java.lang.Object cellBlockCodecClass_; /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
      * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-     * if cannot deal.
+     * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
      * 
*/ public boolean hasCellBlockCodecClass() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
      * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-     * if cannot deal.
+     * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
      * 
*/ public java.lang.String getCellBlockCodecClass() { @@ -1015,11 +1015,11 @@ public final class RPCProtos { } } /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
      * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-     * if cannot deal.
+     * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
      * 
*/ public com.google.protobuf.ByteString @@ -1044,7 +1044,7 @@ public final class RPCProtos { * *
      * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-     * Class must implement hadoop's CompressionCodec Interface
+     * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
      * 
*/ public boolean hasCellBlockCompressorClass() { @@ -1055,7 +1055,7 @@ public final class RPCProtos { * *
      * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-     * Class must implement hadoop's CompressionCodec Interface
+     * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
      * 
*/ public java.lang.String getCellBlockCompressorClass() { @@ -1077,7 +1077,7 @@ public final class RPCProtos { * *
      * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-     * Class must implement hadoop's CompressionCodec Interface
+     * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
      * 
*/ public com.google.protobuf.ByteString @@ -1097,7 +1097,7 @@ public final class RPCProtos { private void initFields() { userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); serviceName_ = ""; - cellBlockCodecClass_ = "org.apache.hadoop.hbase.codec.KeyValueCodec"; + cellBlockCodecClass_ = ""; cellBlockCompressorClass_ = ""; } private byte memoizedIsInitialized = -1; @@ -1349,7 +1349,7 @@ public final class RPCProtos { bitField0_ = (bitField0_ & ~0x00000001); serviceName_ = ""; bitField0_ = (bitField0_ & ~0x00000002); - cellBlockCodecClass_ = "org.apache.hadoop.hbase.codec.KeyValueCodec"; + cellBlockCodecClass_ = ""; bitField0_ = (bitField0_ & ~0x00000004); cellBlockCompressorClass_ = ""; bitField0_ = (bitField0_ & ~0x00000008); @@ -1659,25 +1659,25 @@ public final class RPCProtos { return this; } - // optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; - private java.lang.Object cellBlockCodecClass_ = "org.apache.hadoop.hbase.codec.KeyValueCodec"; + // optional string cell_block_codec_class = 3; + private java.lang.Object cellBlockCodecClass_ = ""; /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
        * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-       * if cannot deal.
+       * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
        * 
*/ public boolean hasCellBlockCodecClass() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
        * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-       * if cannot deal.
+       * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
        * 
*/ public java.lang.String getCellBlockCodecClass() { @@ -1692,11 +1692,11 @@ public final class RPCProtos { } } /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
        * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-       * if cannot deal.
+       * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
        * 
*/ public com.google.protobuf.ByteString @@ -1713,11 +1713,11 @@ public final class RPCProtos { } } /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
        * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-       * if cannot deal.
+       * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
        * 
*/ public Builder setCellBlockCodecClass( @@ -1731,11 +1731,11 @@ public final class RPCProtos { return this; } /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
        * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-       * if cannot deal.
+       * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
        * 
*/ public Builder clearCellBlockCodecClass() { @@ -1745,11 +1745,11 @@ public final class RPCProtos { return this; } /** - * optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + * optional string cell_block_codec_class = 3; * *
        * Cell block codec we will use sending over optional cell blocks.  Server throws exception
-       * if cannot deal.
+       * if cannot deal.  Null means no codec'ing going on so we are pb all the time (SLOW!!!)
        * 
*/ public Builder setCellBlockCodecClassBytes( @@ -1770,7 +1770,7 @@ public final class RPCProtos { * *
        * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-       * Class must implement hadoop's CompressionCodec Interface
+       * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
        * 
*/ public boolean hasCellBlockCompressorClass() { @@ -1781,7 +1781,7 @@ public final class RPCProtos { * *
        * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-       * Class must implement hadoop's CompressionCodec Interface
+       * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
        * 
*/ public java.lang.String getCellBlockCompressorClass() { @@ -1800,7 +1800,7 @@ public final class RPCProtos { * *
        * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-       * Class must implement hadoop's CompressionCodec Interface
+       * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
        * 
*/ public com.google.protobuf.ByteString @@ -1821,7 +1821,7 @@ public final class RPCProtos { * *
        * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-       * Class must implement hadoop's CompressionCodec Interface
+       * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
        * 
*/ public Builder setCellBlockCompressorClass( @@ -1839,7 +1839,7 @@ public final class RPCProtos { * *
        * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-       * Class must implement hadoop's CompressionCodec Interface
+       * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
        * 
*/ public Builder clearCellBlockCompressorClass() { @@ -1853,7 +1853,7 @@ public final class RPCProtos { * *
        * Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
-       * Class must implement hadoop's CompressionCodec Interface
+       * Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
        * 
*/ public Builder setCellBlockCompressorClassBytes( @@ -6002,25 +6002,24 @@ public final class RPCProtos { java.lang.String[] descriptorData = { "\n\tRPC.proto\032\rTracing.proto\032\013hbase.proto\"" + "<\n\017UserInformation\022\026\n\016effective_user\030\001 \002" + - "(\t\022\021\n\treal_user\030\002 \001(\t\"\277\001\n\020ConnectionHead" + + "(\t\022\021\n\treal_user\030\002 \001(\t\"\222\001\n\020ConnectionHead" + "er\022#\n\tuser_info\030\001 \001(\0132\020.UserInformation\022" + - "\024\n\014service_name\030\002 \001(\t\022K\n\026cell_block_code" + - "c_class\030\003 \001(\t:+org.apache.hadoop.hbase.c" + - "odec.KeyValueCodec\022#\n\033cell_block_compres" + - "sor_class\030\004 \001(\t\"\037\n\rCellBlockMeta\022\016\n\006leng" + - "th\030\001 \001(\r\"|\n\021ExceptionResponse\022\034\n\024excepti" + - "on_class_name\030\001 \001(\t\022\023\n\013stack_trace\030\002 \001(\t", - "\022\020\n\010hostname\030\003 \001(\t\022\014\n\004port\030\004 \001(\005\022\024\n\014do_n" + - "ot_retry\030\005 \001(\010\"\254\001\n\rRequestHeader\022\017\n\007call" + - "_id\030\001 \001(\r\022\035\n\ntrace_info\030\002 \001(\0132\t.RPCTInfo" + - "\022\023\n\013method_name\030\003 \001(\t\022\025\n\rrequest_param\030\004" + - " \001(\010\022\'\n\017cell_block_meta\030\005 \001(\0132\016.CellBloc" + - "kMeta\022\026\n\016effective_user\030\006 \001(\t\"q\n\016Respons" + - "eHeader\022\017\n\007call_id\030\001 \001(\r\022%\n\texception\030\002 " + - "\001(\0132\022.ExceptionResponse\022\'\n\017cell_block_me" + - "ta\030\003 \001(\0132\016.CellBlockMetaB<\n*org.apache.h" + - "adoop.hbase.protobuf.generatedB\tRPCProto", - "sH\001\240\001\001" + "\024\n\014service_name\030\002 \001(\t\022\036\n\026cell_block_code" + + "c_class\030\003 \001(\t\022#\n\033cell_block_compressor_c" + + "lass\030\004 \001(\t\"\037\n\rCellBlockMeta\022\016\n\006length\030\001 " + + "\001(\r\"|\n\021ExceptionResponse\022\034\n\024exception_cl" + + "ass_name\030\001 \001(\t\022\023\n\013stack_trace\030\002 \001(\t\022\020\n\010h" + + "ostname\030\003 \001(\t\022\014\n\004port\030\004 \001(\005\022\024\n\014do_not_re", + "try\030\005 \001(\010\"\254\001\n\rRequestHeader\022\017\n\007call_id\030\001" + + " \001(\r\022\035\n\ntrace_info\030\002 \001(\0132\t.RPCTInfo\022\023\n\013m" + + "ethod_name\030\003 \001(\t\022\025\n\rrequest_param\030\004 \001(\010\022" + + "\'\n\017cell_block_meta\030\005 \001(\0132\016.CellBlockMeta" + + "\022\026\n\016effective_user\030\006 \001(\t\"q\n\016ResponseHead" + + "er\022\017\n\007call_id\030\001 \001(\r\022%\n\texception\030\002 \001(\0132\022" + + ".ExceptionResponse\022\'\n\017cell_block_meta\030\003 " + + "\001(\0132\016.CellBlockMetaB<\n*org.apache.hadoop" + + ".hbase.protobuf.generatedB\tRPCProtosH\001\240\001" + + "\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto index ba8708f..d33c2f4 100644 --- a/hbase-protocol/src/main/protobuf/Client.proto +++ b/hbase-protocol/src/main/protobuf/Client.proto @@ -263,14 +263,13 @@ message ScanRequest { * be false. If it is not specified, it means there are more. */ message ScanResponse { - optional ResultCellMeta result_cell_meta = 1; + // If present, lists the count of Cells out in a cellblock. + repeated uint32 cells_length = 1; optional uint64 scanner_id = 2; optional bool more_results = 3; optional uint32 ttl = 4; -} - -message ResultCellMeta { - repeated uint32 cells_length = 1; + // If cells are not carried in an accompanying cellblock, then they are pb'd here. + repeated Result results = 5; } /** diff --git a/hbase-protocol/src/main/protobuf/RPC.proto b/hbase-protocol/src/main/protobuf/RPC.proto index 4207c7c..cdb8ff1 100644 --- a/hbase-protocol/src/main/protobuf/RPC.proto +++ b/hbase-protocol/src/main/protobuf/RPC.proto @@ -81,10 +81,10 @@ message ConnectionHeader { optional UserInformation user_info = 1; optional string service_name = 2; // Cell block codec we will use sending over optional cell blocks. Server throws exception - // if cannot deal. - optional string cell_block_codec_class = 3 [default = "org.apache.hadoop.hbase.codec.KeyValueCodec"]; + // if cannot deal. Null means no codec'ing going on so we are pb all the time (SLOW!!!) + optional string cell_block_codec_class = 3; // Compressor we will use if cell block is compressed. Server will throw exception if not supported. - // Class must implement hadoop's CompressionCodec Interface + // Class must implement hadoop's CompressionCodec Interface. Can't compress if no codec. optional string cell_block_compressor_class = 4; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java index 7b6158d..17625d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java index cf2687c..8995aeb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java @@ -25,4 +25,14 @@ public interface RpcCallContext extends Delayable { * @throws CallerDisconnectedException */ void throwExceptionIfCallerDisconnected(String regionName) throws CallerDisconnectedException; -} + + /** + * If the client connected and specified a codec to use, then we will use this codec making + * cellblocks to return. If the client did not specify a codec, we assume it does not support + * cellblocks and will return all content protobuf'd (though it makes our serving slower). + * We need to ask this question per call because a server could be hosting both clients that + * support cellblocks while fielding requests from clients that do not. + * @return True if the client supports cellblocks, else return all content in pb + */ + boolean isClientCellBlockSupport(); +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 71ebda4..6f3613a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -59,18 +59,16 @@ import javax.security.sasl.Sasl; import javax.security.sasl.SaslException; import javax.security.sasl.SaslServer; -import com.google.common.collect.Lists; -import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Operation; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.exceptions.RegionMovedException; @@ -113,12 +111,12 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.StringUtils; import org.cliffc.high_scale_lib.Counter; -import org.cloudera.htrace.Sampler; import org.cloudera.htrace.Trace; import org.cloudera.htrace.TraceInfo; import org.cloudera.htrace.TraceScope; import org.codehaus.jackson.map.ObjectMapper; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.protobuf.BlockingService; import com.google.protobuf.CodedInputStream; import com.google.protobuf.Descriptors.MethodDescriptor; @@ -203,7 +201,6 @@ public class RpcServer implements RpcServerInterface { protected final Configuration conf; - private int maxQueueLength; private int maxQueueSize; protected int socketSendBufferSize; protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm @@ -444,6 +441,11 @@ public class RpcServer implements RpcServerInterface { } @Override + public boolean isClientCellBlockSupport() { + return this.connection != null && this.connection.codec != null; + } + + @Override public void throwExceptionIfCallerDisconnected(String regionName) throws CallerDisconnectedException { if (!connection.channel.isOpen()) { @@ -1542,7 +1544,9 @@ public class RpcServer implements RpcServerInterface { private void setupCellBlockCodecs(final ConnectionHeader header) throws FatalConnectionException { // TODO: Plug in other supported decoders. + if (!header.hasCellBlockCodecClass()) return; String className = header.getCellBlockCodecClass(); + if (className == null || className.length() == 0) return; try { this.codec = (Codec)Class.forName(className).newInstance(); } catch (Exception e) { @@ -2335,9 +2339,10 @@ public class RpcServer implements RpcServerInterface { } /** - * Needed for delayed calls. We need to be able to store the current call - * so that we can complete it later. - * @return Call the server is currently handling. + * Needed for features such as delayed calls. We need to be able to store the current call + * so that we can complete it later or ask questions of what is supported by the current ongoing + * call. + * @return An RpcCallConext backed by the currently ongoing call (gotten from a thread local) */ public static RpcCallContext getCurrentCall() { return CurCall.get(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java index 4199c06..42bb313 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java @@ -25,12 +25,10 @@ import java.net.InetSocketAddress; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; -import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.security.authorize.PolicyProvider; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Function; import com.google.protobuf.BlockingService; import com.google.protobuf.Descriptors.MethodDescriptor; import com.google.protobuf.Message; @@ -71,4 +69,4 @@ public interface RpcServerInterface { */ @VisibleForTesting void refreshAuthManager(PolicyProvider pp); -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index b44fbb0..a8da03c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -59,7 +59,6 @@ import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.Chore; -import org.apache.hadoop.hbase.DaemonThreadFactory; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -104,6 +103,7 @@ import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; +import org.apache.hadoop.hbase.ipc.RpcCallContext; import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; @@ -162,7 +162,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; @@ -2955,8 +2954,8 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa * @throws ServiceException */ @Override - public ScanResponse scan(final RpcController controller, - final ScanRequest request) throws ServiceException { + public ScanResponse scan(final RpcController controller, final ScanRequest request) + throws ServiceException { Leases.Lease lease = null; String scannerName = null; try { @@ -3019,7 +3018,7 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa if (!isLoadingCfsOnDemandSet) { scan.setLoadColumnFamiliesOnDemand(region.isLoadingCfsOnDemandDefault()); } - byte[] hasMetrics = scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE); + scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE); region.prepareScanner(scan); if (region.getCoprocessorHost() != null) { scanner = region.getCoprocessorHost().preScannerOpen(scan); @@ -3126,16 +3125,7 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa moreResults = false; results = null; } else { - ResultCellMeta.Builder rcmBuilder = ResultCellMeta.newBuilder(); - List cellScannables = new ArrayList(results.size()); - for (Result res : results) { - cellScannables.add(res); - rcmBuilder.addCellsLength(res.size()); - } - builder.setResultCellMeta(rcmBuilder.build()); - // TODO is this okey to assume the type and cast - ((PayloadCarryingRpcController) controller).setCellScanner(CellUtil - .createCellScanner(cellScannables)); + formatResults(builder, results, controller); } } finally { // We're done. On way out re-add the above removed lease. @@ -3183,6 +3173,26 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa } } + private void formatResults(final ScanResponse.Builder builder, final List results, + final RpcController controller) { + if (results == null || results.isEmpty()) return; + RpcCallContext context = RpcServer.getCurrentCall(); + if (context != null && context.isClientCellBlockSupport()) { + List cellScannables = new ArrayList(results.size()); + for (Result res : results) { + cellScannables.add(res); + builder.addCellsLength(res.size()); + } + ((PayloadCarryingRpcController)controller). + setCellScanner(CellUtil.createCellScanner(cellScannables)); + } else { + for (Result res: results) { + ClientProtos.Result pbr = ProtobufUtil.toResult(res); + builder.addResults(pbr); + } + } + } + /** * Atomically bulk load several HFiles into an open region * @return true if successful, false is failed but recoverably (no action) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java index a82e800..b9e8334 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.util.Bytes; @@ -167,11 +166,9 @@ public class TestMetaReaderEditorNoCluster { final List cellScannables = new ArrayList(1); cellScannables.add(new Result(kvs)); final ScanResponse.Builder builder = ScanResponse.newBuilder(); - ResultCellMeta.Builder metaBuilder = ResultCellMeta.newBuilder(); for (CellScannable result : cellScannables) { - metaBuilder.addCellsLength(((Result)result).size()); + builder.addCellsLength(((Result)result).size()); } - builder.setResultCellMeta(metaBuilder.build()); Mockito.when(implementation.scan((RpcController) Mockito.any(), (ScanRequest) Mockito.any())) .thenThrow(new ServiceException("Server not running (1 of 3)")) .thenThrow(new ServiceException("Server not running (2 of 3)")) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 76889d8..a7392eb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -40,9 +40,6 @@ import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.lang.ArrayUtils; @@ -51,7 +48,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -63,6 +59,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; @@ -94,7 +91,6 @@ import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.log4j.Level; import org.junit.After; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java new file mode 100644 index 0000000..f91a449 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSideNoCodec.java @@ -0,0 +1,99 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertTrue; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Do some ops and prove that client and server can work w/o codecs; that we can pb all the time. + * Good for third-party clients or simple scripts that want to talk direct to hbase. + */ +public class TestFromClientSideNoCodec { + protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Turn off codec use + TEST_UTIL.getConfiguration().set("hbase.client.default.rpc.codec", ""); + TEST_UTIL.startMiniCluster(1); + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testBasics() throws IOException { + final byte [] t = Bytes.toBytes("testBasics"); + final byte [][] fs = new byte[][] {Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), + Bytes.toBytes("cf3") }; + HTable ht = TEST_UTIL.createTable(t, fs); + // Check put and get. + final byte [] row = Bytes.toBytes("row"); + Put p = new Put(row); + for (byte [] f: fs) p.add(f, f, f); + ht.put(p); + Result r = ht.get(new Get(row)); + int i = 0; + for (CellScanner cellScanner = r.cellScanner(); cellScanner.advance();) { + Cell cell = cellScanner.current(); + byte [] f = fs[i++]; + assertTrue(Bytes.toString(f), + Bytes.equals(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), + f, 0, f.length)); + } + // Check getRowOrBefore + byte [] f = fs[0]; + r = ht.getRowOrBefore(row, f); + assertTrue(r.toString(), r.containsColumn(f, f)); + // Check scan. + ResultScanner scanner = ht.getScanner(new Scan()); + int count = 0; + while ((r = scanner.next()) != null) { + assertTrue(r.list().size() == 3); + count++; + } + assertTrue(count == 1); + } + + @Test + public void testNoCodec() { + Configuration c = new Configuration(); + c.set("hbase.client.default.rpc.codec", ""); + String codec = RpcClient.getDefaultCodec(c); + assertTrue(codec == null || codec.length() == 0); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java index 6c229c4..431a713 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RowMutations; +import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoRequestProto; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoResponseProto; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EmptyRequestProto; @@ -113,13 +114,16 @@ public class TestIPC { // an echo, just put them back on the controller creating a new block. Tests our block // building. CellScanner cellScanner = pcrc.cellScanner(); - List list = new ArrayList(); - try { - while(cellScanner.advance()) { - list.add(cellScanner.current()); + List list = null; + if (cellScanner != null) { + list = new ArrayList(); + try { + while(cellScanner.advance()) { + list.add(cellScanner.current()); + } + } catch (IOException e) { + throw new ServiceException(e); } - } catch (IOException e) { - throw new ServiceException(e); } cellScanner = CellUtil.createCellScanner(list); ((PayloadCarryingRpcController)controller).setCellScanner(cellScanner); @@ -149,6 +153,38 @@ public class TestIPC { } /** + * Ensure we do not HAVE TO HAVE a codec. + * @throws InterruptedException + * @throws IOException + */ + @Test + public void testNoCodec() throws InterruptedException, IOException { + Configuration conf = HBaseConfiguration.create(); + RpcClient client = new RpcClient(conf, HConstants.CLUSTER_ID_DEFAULT) { + @Override + Codec getCodec() { + return null; + } + }; + TestRpcServer rpcServer = new TestRpcServer(); + try { + rpcServer.start(); + InetSocketAddress address = rpcServer.getListenerAddress(); + MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); + final String message = "hello"; + EchoRequestProto param = EchoRequestProto.newBuilder().setMessage(message).build(); + Pair r = client.call(md, param, null, + md.getOutputType().toProto(), User.getCurrent(), address, 0); + assertTrue(r.getSecond() == null); + // Silly assertion that the message is in the returned pb. + assertTrue(r.getFirst().toString().contains(message)); + } finally { + client.stop(); + rpcServer.stop(); + } + } + + /** * It is hard to verify the compression is actually happening under the wraps. Hope that if * unsupported, we'll get an exception out of some time (meantime, have to trace it manually * to confirm that compression is happening down in the client and server). @@ -160,11 +196,14 @@ public class TestIPC { @Test public void testCompressCellBlock() throws IOException, InterruptedException, SecurityException, NoSuchMethodException { - // Currently, you set - Configuration conf = HBaseConfiguration.create(); + Configuration conf = new Configuration(HBaseConfiguration.create()); conf.set("hbase.client.rpc.compressor", GzipCodec.class.getCanonicalName()); + doSimpleTest(conf, new RpcClient(conf, HConstants.CLUSTER_ID_DEFAULT)); + } + + private void doSimpleTest(final Configuration conf, final RpcClient client) + throws InterruptedException, IOException { TestRpcServer rpcServer = new TestRpcServer(); - RpcClient client = new RpcClient(conf, HConstants.CLUSTER_ID_DEFAULT); List cells = new ArrayList(); int count = 3; for (int i = 0; i < count; i++) cells.add(CELL); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 8c9e0ba..e782d35 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -82,7 +82,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.regionserver.CompactionRequestor; @@ -380,9 +379,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { long scannerId = request.getScannerId(); Result result = next(scannerId); if (result != null) { - ResultCellMeta.Builder metaBuilder = ResultCellMeta.newBuilder(); - metaBuilder.addCellsLength(result.size()); - builder.setResultCellMeta(metaBuilder.build()); + builder.addCellsLength(result.size()); List results = new ArrayList(1); results.add(result); ((PayloadCarryingRpcController) controller).setCellScanner(CellUtil diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java index b30dfec..8133351 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManager.java @@ -64,7 +64,6 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table; @@ -606,9 +605,7 @@ public class TestAssignmentManager { final ScanResponse.Builder builder = ScanResponse.newBuilder(); builder.setMoreResults(true); - ResultCellMeta.Builder metaBuilder = ResultCellMeta.newBuilder(); - metaBuilder.addCellsLength(r.size()); - builder.setResultCellMeta(metaBuilder.build()); + builder.addCellsLength(r.size()); final List cellScannables = new ArrayList(1); cellScannables.add(r); Mockito.when(implementation.scan( @@ -1077,9 +1074,7 @@ public class TestAssignmentManager { Result r = MetaMockingUtil.getMetaTableRowResult(REGIONINFO, SERVERNAME_A); final ScanResponse.Builder builder = ScanResponse.newBuilder(); builder.setMoreResults(true); - ResultCellMeta.Builder metaBuilder = ResultCellMeta.newBuilder(); - metaBuilder.addCellsLength(r.size()); - builder.setResultCellMeta(metaBuilder.build()); + builder.addCellsLength(r.size()); final List rows = new ArrayList(1); rows.add(r); Answer ans = new Answer() { diff --git a/src/main/docbkx/rpc.xml b/src/main/docbkx/rpc.xml index cab396a..cbc59b7 100644 --- a/src/main/docbkx/rpc.xml +++ b/src/main/docbkx/rpc.xml @@ -212,9 +212,18 @@
CellBlock Codecs To enable a codec other than the default KeyValueCodec, set hbase.client.rpc.codec - to the name of the Codec to use. Codec must implement hbase's Codec Interface. After connection setup, + to the name of the Codec class to use. Codec must implement hbase's Codec Interface. After connection setup, all passed cellblocks will be sent with this codec. The server will return cellblocks using this same codec as long as the codec is on the servers' CLASSPATH (else you will get UnsupportedCellCodecException). + To change the default codec, set hbase.client.default.rpc.codec. + + To disable cellblocks completely and to go pure protobuf, set the default to the + empty String and do not specify a codec in your Configuration. So, set hbase.client.default.rpc.codec + to the empty string and do not set hbase.client.rpc.codec. + This will cause the client to connect to the server with no codec specified. + If a server sees no codec, it will return all responses in pure protobuf. + Running pure protobuf all the time will be slower than running with cellblocks. +
Compression Uses hadoops compression codecs. To enable compressing of passed CellBlocks, set hbase.client.rpc.compressor