Index: hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java (revision 1468620) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java (working copy) @@ -1161,6 +1161,9 @@ if (tScan.isSetCaching()) { scan.setCaching(tScan.getCaching()); } + if (tScan.isSetBatchSize()) { + scan.setBatch(tScan.getBatchSize()); + } if (tScan.isSetColumns() && tScan.getColumns().size() != 0) { for(ByteBuffer column : tScan.getColumns()) { byte [][] famQf = KeyValue.parseColumn(getBytes(column)); Index: hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java (revision 1468620) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/generated/TScan.java (working copy) @@ -42,6 +42,7 @@ private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)4); private static final org.apache.thrift.protocol.TField CACHING_FIELD_DESC = new org.apache.thrift.protocol.TField("caching", org.apache.thrift.protocol.TType.I32, (short)5); private static final org.apache.thrift.protocol.TField FILTER_STRING_FIELD_DESC = new org.apache.thrift.protocol.TField("filterString", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField BATCH_SIZE_FIELD_DESC = new org.apache.thrift.protocol.TField("batchSize", org.apache.thrift.protocol.TType.I32, (short)7); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ public List columns; // optional public int caching; // optional public ByteBuffer filterString; // optional + public int batchSize; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -63,7 +65,8 @@ TIMESTAMP((short)3, "timestamp"), COLUMNS((short)4, "columns"), CACHING((short)5, "caching"), - FILTER_STRING((short)6, "filterString"); + FILTER_STRING((short)6, "filterString"), + BATCH_SIZE((short)7, "batchSize"); private static final Map byName = new HashMap(); @@ -90,6 +93,8 @@ return CACHING; case 6: // FILTER_STRING return FILTER_STRING; + case 7: // BATCH_SIZE + return BATCH_SIZE; default: return null; } @@ -132,8 +137,9 @@ // isset id assignments private static final int __TIMESTAMP_ISSET_ID = 0; private static final int __CACHING_ISSET_ID = 1; + private static final int __BATCHSIZE_ISSET_ID = 2; private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.START_ROW,_Fields.STOP_ROW,_Fields.TIMESTAMP,_Fields.COLUMNS,_Fields.CACHING,_Fields.FILTER_STRING}; + private _Fields optionals[] = {_Fields.START_ROW,_Fields.STOP_ROW,_Fields.TIMESTAMP,_Fields.COLUMNS,_Fields.CACHING,_Fields.FILTER_STRING,_Fields.BATCH_SIZE}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -150,6 +156,8 @@ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); tmpMap.put(_Fields.FILTER_STRING, new org.apache.thrift.meta_data.FieldMetaData("filterString", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "Text"))); + tmpMap.put(_Fields.BATCH_SIZE, new org.apache.thrift.meta_data.FieldMetaData("batchSize", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TScan.class, metaDataMap); } @@ -180,6 +188,7 @@ if (other.isSetFilterString()) { this.filterString = other.filterString; } + this.batchSize = other.batchSize; } public TScan deepCopy() { @@ -196,6 +205,8 @@ setCachingIsSet(false); this.caching = 0; this.filterString = null; + setBatchSizeIsSet(false); + this.batchSize = 0; } public byte[] getStartRow() { @@ -385,6 +396,29 @@ } } + public int getBatchSize() { + return this.batchSize; + } + + public TScan setBatchSize(int batchSize) { + this.batchSize = batchSize; + setBatchSizeIsSet(true); + return this; + } + + public void unsetBatchSize() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __BATCHSIZE_ISSET_ID); + } + + /** Returns true if field batchSize is set (has been assigned a value) and false otherwise */ + public boolean isSetBatchSize() { + return EncodingUtils.testBit(__isset_bitfield, __BATCHSIZE_ISSET_ID); + } + + public void setBatchSizeIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __BATCHSIZE_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case START_ROW: @@ -435,6 +469,14 @@ } break; + case BATCH_SIZE: + if (value == null) { + unsetBatchSize(); + } else { + setBatchSize((Integer)value); + } + break; + } } @@ -458,6 +500,9 @@ case FILTER_STRING: return getFilterString(); + case BATCH_SIZE: + return Integer.valueOf(getBatchSize()); + } throw new IllegalStateException(); } @@ -481,6 +526,8 @@ return isSetCaching(); case FILTER_STRING: return isSetFilterString(); + case BATCH_SIZE: + return isSetBatchSize(); } throw new IllegalStateException(); } @@ -552,6 +599,15 @@ return false; } + boolean this_present_batchSize = true && this.isSetBatchSize(); + boolean that_present_batchSize = true && that.isSetBatchSize(); + if (this_present_batchSize || that_present_batchSize) { + if (!(this_present_batchSize && that_present_batchSize)) + return false; + if (this.batchSize != that.batchSize) + return false; + } + return true; } @@ -628,6 +684,16 @@ return lastComparison; } } + lastComparison = Boolean.valueOf(isSetBatchSize()).compareTo(typedOther.isSetBatchSize()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetBatchSize()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.batchSize, typedOther.batchSize); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -699,6 +765,12 @@ } first = false; } + if (isSetBatchSize()) { + if (!first) sb.append(", "); + sb.append("batchSize:"); + sb.append(this.batchSize); + first = false; + } sb.append(")"); return sb.toString(); } @@ -802,6 +874,14 @@ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 7: // BATCH_SIZE + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.batchSize = iprot.readI32(); + struct.setBatchSizeIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -862,6 +942,11 @@ oprot.writeFieldEnd(); } } + if (struct.isSetBatchSize()) { + oprot.writeFieldBegin(BATCH_SIZE_FIELD_DESC); + oprot.writeI32(struct.batchSize); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -898,7 +983,10 @@ if (struct.isSetFilterString()) { optionals.set(5); } - oprot.writeBitSet(optionals, 6); + if (struct.isSetBatchSize()) { + optionals.set(6); + } + oprot.writeBitSet(optionals, 7); if (struct.isSetStartRow()) { oprot.writeBinary(struct.startRow); } @@ -923,12 +1011,15 @@ if (struct.isSetFilterString()) { oprot.writeBinary(struct.filterString); } + if (struct.isSetBatchSize()) { + oprot.writeI32(struct.batchSize); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, TScan struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(6); + BitSet incoming = iprot.readBitSet(7); if (incoming.get(0)) { struct.startRow = iprot.readBinary(); struct.setStartRowIsSet(true); @@ -962,6 +1053,10 @@ struct.filterString = iprot.readBinary(); struct.setFilterStringIsSet(true); } + if (incoming.get(6)) { + struct.batchSize = iprot.readI32(); + struct.setBatchSizeIsSet(true); + } } } Index: hbase-server/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift =================================================================== --- hbase-server/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift (revision 1468620) +++ hbase-server/src/main/resources/org/apache/hadoop/hbase/thrift/Hbase.thrift (working copy) @@ -138,7 +138,8 @@ 3:optional i64 timestamp, 4:optional list columns, 5:optional i32 caching, - 6:optional Text filterString + 6:optional Text filterString, + 7:optional i32 batchSize } //