Index: src/main/java/org/apache/hadoop/hbase/avro/hbase.genavro
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/hbase.genavro (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/hbase.genavro (working copy)
@@ -1,271 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Avro protocol for a "gateway" service
- */
-@namespace("org.apache.hadoop.hbase.avro.generated")
-protocol HBase {
-
- //
- // TYPES
- //
-
- //
- // Cluster metadata
- //
- // TODO(hammer): Best way to represent java.net.InetSocketAddress?
- record AServerAddress {
- string hostname;
- string inetSocketAddress;
- int port;
- }
-
- record ARegionLoad {
- int memStoreSizeMB;
- bytes name;
- int storefileIndexSizeMB;
- int storefiles;
- int storefileSizeMB;
- int stores;
- }
-
- record AServerLoad {
- int load;
- int maxHeapMB;
- int memStoreSizeInMB;
- int numberOfRegions;
- int numberOfRequests;
- array regionsLoad;
- int storefileIndexSizeInMB;
- int storefiles;
- int storefileSizeInMB;
- int usedHeapMB;
- }
-
- record AServerInfo {
- int infoPort;
- AServerLoad load;
- AServerAddress serverAddress;
- string serverName;
- long startCode;
- }
-
- // TODO(hammer): Implement reusable Writable to Avro record converter?
- record AClusterStatus {
- double averageLoad;
- array deadServerNames;
- int deadServers;
- string hbaseVersion;
- int regionsCount;
- int requestsCount;
- array serverInfos;
- int servers;
- }
-
- //
- // Family metadata
- //
- // TODO(hammer): how to keep in sync with Java Enum?
- enum ACompressionAlgorithm {
- LZO, GZ, NONE
- }
-
- // TODO(hammer): include COLUMN_DESCRIPTOR_VERSION?
- // TODO(hammer): add new bloomfilter stuff
- record AFamilyDescriptor {
- bytes name;
- union { ACompressionAlgorithm, null } compression;
- union { int, null } maxVersions;
- union { int, null } blocksize;
- union { boolean, null } inMemory;
- union { int, null } timeToLive;
- union { boolean, null } blockCacheEnabled;
- }
-
- //
- // Table metadata
- //
- // TODO(hammer): include TABLE_DESCRIPTOR_VERSION?
- record ATableDescriptor {
- bytes name;
- union { array, null } families;
- union { long, null } maxFileSize;
- union { long, null } memStoreFlushSize;
- union { boolean, null } rootRegion;
- union { boolean, null } metaRegion;
- union { boolean, null } metaTable;
- union { boolean, null } readOnly;
- union { boolean, null } deferredLogFlush;
- }
-
- //
- // Single-Row DML (Get)
- //
- record AColumn {
- bytes family;
- union { bytes, null } qualifier;
- }
-
- record ATimeRange {
- long minStamp;
- long maxStamp;
- }
-
- // TODO(hammer): Add filter options
- record AGet {
- bytes row;
- union { array, null } columns;
- union { long, null } timestamp;
- union { ATimeRange, null } timerange;
- union { int, null } maxVersions;
- }
-
- record AResultEntry {
- bytes family;
- bytes qualifier;
- bytes value;
- long timestamp;
- }
-
- // Avro maps can't use non-string keys, so using an array for now
- record AResult {
- bytes row;
- array entries;
- }
-
- //
- // Single-Row DML (Put)
- //
- // TODO(hammer): Reuse a single KeyValue-style record for Get and Put?
- record AColumnValue {
- bytes family;
- bytes qualifier;
- bytes value;
- union { long, null } timestamp;
- }
-
- record APut {
- bytes row;
- array columnValues;
- }
-
- //
- // Single-Row DML (Delete)
- //
- // TODO(hammer): Add fields when API is rationalized (HBASE-2609)
- record ADelete {
- bytes row;
- union { array, null } columns;
- }
-
- //
- // Multi-Row DML (Scan)
- //
- record AScan {
- union { bytes, null } startRow;
- union { bytes, null } stopRow;
- union { array, null } columns;
- union { long, null } timestamp;
- union { ATimeRange, null } timerange;
- union { int, null } maxVersions;
- }
-
- //
- // ERRORS
- //
-
- /**
- * An AIOError error signals that an error occurred communicating
- * to the HBase master or a HBase region server. Also used to return
- * more general HBase error conditions.
- */
- error AIOError {
- string message;
- }
-
- /**
- * An AIllegalArgument error indicates an illegal or invalid
- * argument was passed into a procedure.
- */
- error AIllegalArgument {
- string message;
- }
-
- /**
- * An ATableExists error that a table with the specified
- * name already exists
- */
- error ATableExists {
- string message;
- }
-
- /**
- * An AMasterNotRunning error means we couldn't reach the Master.
- */
- error AMasterNotRunning {
- string message;
- }
-
- //
- // MESSAGES
- //
-
- // TODO(hammer): surgery tools
- // TODO(hammer): checkAndPut/flushCommits
- // TODO(hammer): MultiPut/Get/Delete
-
- // Cluster metadata
- string getHBaseVersion() throws AIOError;
- AClusterStatus getClusterStatus() throws AIOError;
- array listTables() throws AIOError;
-
- // Table metadata
- ATableDescriptor describeTable(bytes table) throws AIOError;
- boolean isTableEnabled(bytes table) throws AIOError;
- boolean tableExists(bytes table) throws AIOError;
-
- // Family metadata
- AFamilyDescriptor describeFamily(bytes table, bytes family) throws AIOError;
-
- // Table admin
- void createTable(ATableDescriptor table) throws AIOError, AIllegalArgument, ATableExists, AMasterNotRunning;
- void deleteTable(bytes table) throws AIOError;
- void modifyTable(bytes table, ATableDescriptor tableDescriptor) throws AIOError;
- void enableTable(bytes table) throws AIOError;
- void disableTable(bytes table) throws AIOError;
- void flush(bytes table) throws AIOError;
- void split(bytes table) throws AIOError;
-
- // Family admin
- void addFamily(bytes table, AFamilyDescriptor family) throws AIOError;
- void deleteFamily(bytes table, bytes family) throws AIOError;
- void modifyFamily(bytes table, bytes familyName, AFamilyDescriptor familyDescriptor) throws AIOError;
-
- // Single-row DML
- AResult get(bytes table, AGet get) throws AIOError;
- boolean exists(bytes table, AGet get) throws AIOError;
- void put(bytes table, APut put) throws AIOError;
- void delete(bytes table, ADelete delete) throws AIOError;
- long incrementColumnValue(bytes table, bytes row, bytes family, bytes qualifier, long amount, boolean writeToWAL) throws AIOError;
-
- // Multi-row DML (read-only)
- int scannerOpen(bytes table, AScan scan) throws AIOError;
- void scannerClose(int scannerId) throws AIOError, AIllegalArgument;
- array scannerGetRows(int scannerId, int numberOfRows) throws AIOError, AIllegalArgument;
-}
Index: src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/AvroUtil.java (working copy)
@@ -128,14 +128,14 @@
acs.averageLoad = cs.getAverageLoad();
Collection deadServerNames = cs.getDeadServerNames();
Schema stringArraySchema = Schema.createArray(Schema.create(Schema.Type.STRING));
- GenericData.Array adeadServerNames = null;
+ GenericData.Array adeadServerNames = null;
if (deadServerNames != null) {
- adeadServerNames = new GenericData.Array(deadServerNames.size(), stringArraySchema);
+ adeadServerNames = new GenericData.Array(deadServerNames.size(), stringArraySchema);
for (String deadServerName : deadServerNames) {
adeadServerNames.add(new Utf8(deadServerName));
}
} else {
- adeadServerNames = new GenericData.Array(0, stringArraySchema);
+ adeadServerNames = new GenericData.Array(0, stringArraySchema);
}
acs.deadServerNames = adeadServerNames;
acs.deadServers = cs.getDeadServers();
Index: src/main/java/org/apache/hadoop/hbase/avro/hbase.avdl
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/hbase.avdl (revision 0)
+++ src/main/java/org/apache/hadoop/hbase/avro/hbase.avdl (revision 0)
@@ -0,0 +1,271 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Avro protocol for a "gateway" service
+ */
+@namespace("org.apache.hadoop.hbase.avro.generated")
+protocol HBase {
+
+ //
+ // TYPES
+ //
+
+ //
+ // Cluster metadata
+ //
+ // TODO(hammer): Best way to represent java.net.InetSocketAddress?
+ record AServerAddress {
+ string hostname;
+ string inetSocketAddress;
+ int port;
+ }
+
+ record ARegionLoad {
+ int memStoreSizeMB;
+ bytes name;
+ int storefileIndexSizeMB;
+ int storefiles;
+ int storefileSizeMB;
+ int stores;
+ }
+
+ record AServerLoad {
+ int load;
+ int maxHeapMB;
+ int memStoreSizeInMB;
+ int numberOfRegions;
+ int numberOfRequests;
+ array regionsLoad;
+ int storefileIndexSizeInMB;
+ int storefiles;
+ int storefileSizeInMB;
+ int usedHeapMB;
+ }
+
+ record AServerInfo {
+ int infoPort;
+ AServerLoad load;
+ AServerAddress serverAddress;
+ string serverName;
+ long startCode;
+ }
+
+ // TODO(hammer): Implement reusable Writable to Avro record converter?
+ record AClusterStatus {
+ double averageLoad;
+ array deadServerNames;
+ int deadServers;
+ string hbaseVersion;
+ int regionsCount;
+ int requestsCount;
+ array serverInfos;
+ int servers;
+ }
+
+ //
+ // Family metadata
+ //
+ // TODO(hammer): how to keep in sync with Java Enum?
+ enum ACompressionAlgorithm {
+ LZO, GZ, NONE
+ }
+
+ // TODO(hammer): include COLUMN_DESCRIPTOR_VERSION?
+ // TODO(hammer): add new bloomfilter stuff
+ record AFamilyDescriptor {
+ bytes name;
+ union { ACompressionAlgorithm, null } compression;
+ union { int, null } maxVersions;
+ union { int, null } blocksize;
+ union { boolean, null } inMemory;
+ union { int, null } timeToLive;
+ union { boolean, null } blockCacheEnabled;
+ }
+
+ //
+ // Table metadata
+ //
+ // TODO(hammer): include TABLE_DESCRIPTOR_VERSION?
+ record ATableDescriptor {
+ bytes name;
+ union { array, null } families;
+ union { long, null } maxFileSize;
+ union { long, null } memStoreFlushSize;
+ union { boolean, null } rootRegion;
+ union { boolean, null } metaRegion;
+ union { boolean, null } metaTable;
+ union { boolean, null } readOnly;
+ union { boolean, null } deferredLogFlush;
+ }
+
+ //
+ // Single-Row DML (Get)
+ //
+ record AColumn {
+ bytes family;
+ union { bytes, null } qualifier;
+ }
+
+ record ATimeRange {
+ long minStamp;
+ long maxStamp;
+ }
+
+ // TODO(hammer): Add filter options
+ record AGet {
+ bytes row;
+ union { array, null } columns;
+ union { long, null } timestamp;
+ union { ATimeRange, null } timerange;
+ union { int, null } maxVersions;
+ }
+
+ record AResultEntry {
+ bytes family;
+ bytes qualifier;
+ bytes value;
+ long timestamp;
+ }
+
+ // Avro maps can't use non-string keys, so using an array for now
+ record AResult {
+ bytes row;
+ array entries;
+ }
+
+ //
+ // Single-Row DML (Put)
+ //
+ // TODO(hammer): Reuse a single KeyValue-style record for Get and Put?
+ record AColumnValue {
+ bytes family;
+ bytes qualifier;
+ bytes value;
+ union { long, null } timestamp;
+ }
+
+ record APut {
+ bytes row;
+ array columnValues;
+ }
+
+ //
+ // Single-Row DML (Delete)
+ //
+ // TODO(hammer): Add fields when API is rationalized (HBASE-2609)
+ record ADelete {
+ bytes row;
+ union { array, null } columns;
+ }
+
+ //
+ // Multi-Row DML (Scan)
+ //
+ record AScan {
+ union { bytes, null } startRow;
+ union { bytes, null } stopRow;
+ union { array, null } columns;
+ union { long, null } timestamp;
+ union { ATimeRange, null } timerange;
+ union { int, null } maxVersions;
+ }
+
+ //
+ // ERRORS
+ //
+
+ /**
+ * An AIOError error signals that an error occurred communicating
+ * to the HBase master or a HBase region server. Also used to return
+ * more general HBase error conditions.
+ */
+ error AIOError {
+ string message;
+ }
+
+ /**
+ * An AIllegalArgument error indicates an illegal or invalid
+ * argument was passed into a procedure.
+ */
+ error AIllegalArgument {
+ string message;
+ }
+
+ /**
+ * An ATableExists error that a table with the specified
+ * name already exists
+ */
+ error ATableExists {
+ string message;
+ }
+
+ /**
+ * An AMasterNotRunning error means we couldn't reach the Master.
+ */
+ error AMasterNotRunning {
+ string message;
+ }
+
+ //
+ // MESSAGES
+ //
+
+ // TODO(hammer): surgery tools
+ // TODO(hammer): checkAndPut/flushCommits
+ // TODO(hammer): MultiPut/Get/Delete
+
+ // Cluster metadata
+ string getHBaseVersion() throws AIOError;
+ AClusterStatus getClusterStatus() throws AIOError;
+ array listTables() throws AIOError;
+
+ // Table metadata
+ ATableDescriptor describeTable(bytes table) throws AIOError;
+ boolean isTableEnabled(bytes table) throws AIOError;
+ boolean tableExists(bytes table) throws AIOError;
+
+ // Family metadata
+ AFamilyDescriptor describeFamily(bytes table, bytes family) throws AIOError;
+
+ // Table admin
+ void createTable(ATableDescriptor table) throws AIOError, AIllegalArgument, ATableExists, AMasterNotRunning;
+ void deleteTable(bytes table) throws AIOError;
+ void modifyTable(bytes table, ATableDescriptor tableDescriptor) throws AIOError;
+ void enableTable(bytes table) throws AIOError;
+ void disableTable(bytes table) throws AIOError;
+ void flush(bytes table) throws AIOError;
+ void split(bytes table) throws AIOError;
+
+ // Family admin
+ void addFamily(bytes table, AFamilyDescriptor family) throws AIOError;
+ void deleteFamily(bytes table, bytes family) throws AIOError;
+ void modifyFamily(bytes table, bytes familyName, AFamilyDescriptor familyDescriptor) throws AIOError;
+
+ // Single-row DML
+ AResult get(bytes table, AGet get) throws AIOError;
+ boolean exists(bytes table, AGet get) throws AIOError;
+ void put(bytes table, APut put) throws AIOError;
+ void delete(bytes table, ADelete delete) throws AIOError;
+ long incrementColumnValue(bytes table, bytes row, bytes family, bytes qualifier, long amount, boolean writeToWAL) throws AIOError;
+
+ // Multi-row DML (read-only)
+ int scannerOpen(bytes table, AScan scan) throws AIOError;
+ void scannerClose(int scannerId) throws AIOError, AIllegalArgument;
+ array scannerGetRows(int scannerId, int numberOfRows) throws AIOError, AIllegalArgument;
+}
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/AResultEntry.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/AResultEntry.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/AResultEntry.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -8,6 +13,7 @@
public java.nio.ByteBuffer value;
public long timestamp;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return family;
@@ -17,6 +23,7 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/ADelete.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/ADelete.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/ADelete.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -4,8 +9,9 @@
public class ADelete extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"ADelete\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AColumn\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]}},\"null\"]}]}");
public java.nio.ByteBuffer row;
- public org.apache.avro.generic.GenericArray columns;
+ public java.util.List columns;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return row;
@@ -13,11 +19,12 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
case 0: row = (java.nio.ByteBuffer)value$; break;
- case 1: columns = (org.apache.avro.generic.GenericArray)value$; break;
+ case 1: columns = (java.util.List)value$; break;
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/AServerLoad.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/AServerLoad.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/AServerLoad.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -8,12 +13,13 @@
public int memStoreSizeInMB;
public int numberOfRegions;
public int numberOfRequests;
- public org.apache.avro.generic.GenericArray regionsLoad;
+ public java.util.List regionsLoad;
public int storefileIndexSizeInMB;
public int storefiles;
public int storefileSizeInMB;
public int usedHeapMB;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return load;
@@ -29,6 +35,7 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
@@ -37,7 +44,7 @@
case 2: memStoreSizeInMB = (java.lang.Integer)value$; break;
case 3: numberOfRegions = (java.lang.Integer)value$; break;
case 4: numberOfRequests = (java.lang.Integer)value$; break;
- case 5: regionsLoad = (org.apache.avro.generic.GenericArray)value$; break;
+ case 5: regionsLoad = (java.util.List)value$; break;
case 6: storefileIndexSizeInMB = (java.lang.Integer)value$; break;
case 7: storefiles = (java.lang.Integer)value$; break;
case 8: storefileSizeInMB = (java.lang.Integer)value$; break;
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/ACompressionAlgorithm.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/ACompressionAlgorithm.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/ACompressionAlgorithm.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/APut.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/APut.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/APut.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -4,8 +9,9 @@
public class APut extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"APut\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columnValues\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AColumnValue\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]}]}}}]}");
public java.nio.ByteBuffer row;
- public org.apache.avro.generic.GenericArray columnValues;
+ public java.util.List columnValues;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return row;
@@ -13,11 +19,12 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
case 0: row = (java.nio.ByteBuffer)value$; break;
- case 1: columnValues = (org.apache.avro.generic.GenericArray)value$; break;
+ case 1: columnValues = (java.util.List)value$; break;
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/ATimeRange.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/ATimeRange.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/ATimeRange.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -6,6 +11,7 @@
public long minStamp;
public long maxStamp;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return minStamp;
@@ -13,6 +19,7 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/AServerAddress.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/AServerAddress.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/AServerAddress.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -3,8 +8,9 @@
public class AServerAddress extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AServerAddress\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"hostname\",\"type\":\"string\"},{\"name\":\"inetSocketAddress\",\"type\":\"string\"},{\"name\":\"port\",\"type\":\"int\"}]}");
- public org.apache.avro.util.Utf8 hostname;
- public org.apache.avro.util.Utf8 inetSocketAddress;
+ public java.lang.CharSequence hostname;
+ public java.lang.CharSequence inetSocketAddress;
public int port;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
@@ -15,11 +21,12 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
- case 0: hostname = (org.apache.avro.util.Utf8)value$; break;
- case 1: inetSocketAddress = (org.apache.avro.util.Utf8)value$; break;
+ case 0: hostname = (java.lang.CharSequence)value$; break;
+ case 1: inetSocketAddress = (java.lang.CharSequence)value$; break;
case 2: port = (java.lang.Integer)value$; break;
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/AIllegalArgument.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/AIllegalArgument.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/AIllegalArgument.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -3,6 +8,7 @@
public class AIllegalArgument extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord {
public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"AIllegalArgument\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}");
- public org.apache.avro.util.Utf8 message;
+ public java.lang.CharSequence message;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
@@ -11,10 +17,11 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
- case 0: message = (org.apache.avro.util.Utf8)value$; break;
+ case 0: message = (java.lang.CharSequence)value$; break;
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/AMasterNotRunning.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/AMasterNotRunning.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/AMasterNotRunning.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -3,6 +8,7 @@
public class AMasterNotRunning extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord {
public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"AMasterNotRunning\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}");
- public org.apache.avro.util.Utf8 message;
+ public java.lang.CharSequence message;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
@@ -11,10 +17,11 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
- case 0: message = (org.apache.avro.util.Utf8)value$; break;
+ case 0: message = (java.lang.CharSequence)value$; break;
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/AResult.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/AResult.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/AResult.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -4,8 +9,9 @@
public class AResult extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AResult\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"entries\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AResultEntry\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":\"long\"}]}}}]}");
public java.nio.ByteBuffer row;
- public org.apache.avro.generic.GenericArray entries;
+ public java.util.List entries;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return row;
@@ -13,11 +19,12 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
case 0: row = (java.nio.ByteBuffer)value$; break;
- case 1: entries = (org.apache.avro.generic.GenericArray)value$; break;
+ case 1: entries = (java.util.List)value$; break;
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/AGet.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/AGet.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/AGet.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -4,11 +9,12 @@
public class AGet extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AGet\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AColumn\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]}},\"null\"]},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]},{\"name\":\"timerange\",\"type\":[{\"type\":\"record\",\"name\":\"ATimeRange\",\"fields\":[{\"name\":\"minStamp\",\"type\":\"long\"},{\"name\":\"maxStamp\",\"type\":\"long\"}]},\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]}]}");
public java.nio.ByteBuffer row;
- public org.apache.avro.generic.GenericArray columns;
+ public java.util.List columns;
public java.lang.Long timestamp;
public org.apache.hadoop.hbase.avro.generated.ATimeRange timerange;
public java.lang.Integer maxVersions;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return row;
@@ -19,11 +25,12 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
case 0: row = (java.nio.ByteBuffer)value$; break;
- case 1: columns = (org.apache.avro.generic.GenericArray)value$; break;
+ case 1: columns = (java.util.List)value$; break;
case 2: timestamp = (java.lang.Long)value$; break;
case 3: timerange = (org.apache.hadoop.hbase.avro.generated.ATimeRange)value$; break;
case 4: maxVersions = (java.lang.Integer)value$; break;
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/AFamilyDescriptor.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/AFamilyDescriptor.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/AFamilyDescriptor.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -11,6 +16,7 @@
public java.lang.Integer timeToLive;
public java.lang.Boolean blockCacheEnabled;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return name;
@@ -23,6 +29,7 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/AIOError.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/AIOError.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/AIOError.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -2,6 +7,6 @@
public class AIOError extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord {
- public static final org.apache.avro.Schema SCHEMA$ =
- org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"AIOError\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}");
- public org.apache.avro.util.Utf8 message;
+ public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"AIOError\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}");
+ public java.lang.CharSequence message;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
@@ -12,10 +17,11 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
- case 0: message = (org.apache.avro.util.Utf8)value$; break;
+ case 0: message = (java.lang.CharSequence)value$; break;
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/HBase.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/HBase.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/HBase.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -3,53 +8,28 @@
public interface HBase {
public static final org.apache.avro.Protocol PROTOCOL = org.apache.avro.Protocol.parse("{\"protocol\":\"HBase\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"types\":[{\"type\":\"record\",\"name\":\"AServerAddress\",\"fields\":[{\"name\":\"hostname\",\"type\":\"string\"},{\"name\":\"inetSocketAddress\",\"type\":\"string\"},{\"name\":\"port\",\"type\":\"int\"}]},{\"type\":\"record\",\"name\":\"ARegionLoad\",\"fields\":[{\"name\":\"memStoreSizeMB\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"storefileIndexSizeMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeMB\",\"type\":\"int\"},{\"name\":\"stores\",\"type\":\"int\"}]},{\"type\":\"record\",\"name\":\"AServerLoad\",\"fields\":[{\"name\":\"load\",\"type\":\"int\"},{\"name\":\"maxHeapMB\",\"type\":\"int\"},{\"name\":\"memStoreSizeInMB\",\"type\":\"int\"},{\"name\":\"numberOfRegions\",\"type\":\"int\"},{\"name\":\"numberOfRequests\",\"type\":\"int\"},{\"name\":\"regionsLoad\",\"type\":{\"type\":\"array\",\"items\":\"ARegionLoad\"}},{\"name\":\"storefileIndexSizeInMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeInMB\",\"type\":\"int\"},{\"name\":\"usedHeapMB\",\"type\":\"int\"}]},{\"type\":\"record\",\"name\":\"AServerInfo\",\"fields\":[{\"name\":\"infoPort\",\"type\":\"int\"},{\"name\":\"load\",\"type\":\"AServerLoad\"},{\"name\":\"serverAddress\",\"type\":\"AServerAddress\"},{\"name\":\"serverName\",\"type\":\"string\"},{\"name\":\"startCode\",\"type\":\"long\"}]},{\"type\":\"record\",\"name\":\"AClusterStatus\",\"fields\":[{\"name\":\"averageLoad\",\"type\":\"double\"},{\"name\":\"deadServerNames\",\"type\":{\"type\":\"array\",\"items\":\"string\"}},{\"name\":\"deadServers\",\"type\":\"int\"},{\"name\":\"hbaseVersion\",\"type\":\"string\"},{\"name\":\"regionsCount\",\"type\":\"int\"},{\"name\":\"requestsCount\",\"type\":\"int\"},{\"name\":\"serverInfos\",\"type\":{\"type\":\"array\",\"items\":\"AServerInfo\"}},{\"name\":\"servers\",\"type\":\"int\"}]},{\"type\":\"enum\",\"name\":\"ACompressionAlgorithm\",\"symbols\":[\"LZO\",\"GZ\",\"NONE\"]},{\"type\":\"record\",\"name\":\"AFamilyDescriptor\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"compression\",\"type\":[\"ACompressionAlgorithm\",\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]},{\"name\":\"blocksize\",\"type\":[\"int\",\"null\"]},{\"name\":\"inMemory\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"timeToLive\",\"type\":[\"int\",\"null\"]},{\"name\":\"blockCacheEnabled\",\"type\":[\"boolean\",\"null\"]}]},{\"type\":\"record\",\"name\":\"ATableDescriptor\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"families\",\"type\":[{\"type\":\"array\",\"items\":\"AFamilyDescriptor\"},\"null\"]},{\"name\":\"maxFileSize\",\"type\":[\"long\",\"null\"]},{\"name\":\"memStoreFlushSize\",\"type\":[\"long\",\"null\"]},{\"name\":\"rootRegion\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"metaRegion\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"metaTable\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"readOnly\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"deferredLogFlush\",\"type\":[\"boolean\",\"null\"]}]},{\"type\":\"record\",\"name\":\"AColumn\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]},{\"type\":\"record\",\"name\":\"ATimeRange\",\"fields\":[{\"name\":\"minStamp\",\"type\":\"long\"},{\"name\":\"maxStamp\",\"type\":\"long\"}]},{\"type\":\"record\",\"name\":\"AGet\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":\"AColumn\"},\"null\"]},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]},{\"name\":\"timerange\",\"type\":[\"ATimeRange\",\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]}]},{\"type\":\"record\",\"name\":\"AResultEntry\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":\"long\"}]},{\"type\":\"record\",\"name\":\"AResult\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"entries\",\"type\":{\"type\":\"array\",\"items\":\"AResultEntry\"}}]},{\"type\":\"record\",\"name\":\"AColumnValue\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"value\",\"type\":\"bytes\"},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]}]},{\"type\":\"record\",\"name\":\"APut\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columnValues\",\"type\":{\"type\":\"array\",\"items\":\"AColumnValue\"}}]},{\"type\":\"record\",\"name\":\"ADelete\",\"fields\":[{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":\"AColumn\"},\"null\"]}]},{\"type\":\"record\",\"name\":\"AScan\",\"fields\":[{\"name\":\"startRow\",\"type\":[\"bytes\",\"null\"]},{\"name\":\"stopRow\",\"type\":[\"bytes\",\"null\"]},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":\"AColumn\"},\"null\"]},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]},{\"name\":\"timerange\",\"type\":[\"ATimeRange\",\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]}]},{\"type\":\"error\",\"name\":\"AIOError\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]},{\"type\":\"error\",\"name\":\"AIllegalArgument\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]},{\"type\":\"error\",\"name\":\"ATableExists\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]},{\"type\":\"error\",\"name\":\"AMasterNotRunning\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}],\"messages\":{\"getHBaseVersion\":{\"request\":[],\"response\":\"string\",\"errors\":[\"AIOError\"]},\"getClusterStatus\":{\"request\":[],\"response\":\"AClusterStatus\",\"errors\":[\"AIOError\"]},\"listTables\":{\"request\":[],\"response\":{\"type\":\"array\",\"items\":\"ATableDescriptor\"},\"errors\":[\"AIOError\"]},\"describeTable\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"ATableDescriptor\",\"errors\":[\"AIOError\"]},\"isTableEnabled\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"boolean\",\"errors\":[\"AIOError\"]},\"tableExists\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"boolean\",\"errors\":[\"AIOError\"]},\"describeFamily\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"family\",\"type\":\"bytes\"}],\"response\":\"AFamilyDescriptor\",\"errors\":[\"AIOError\"]},\"createTable\":{\"request\":[{\"name\":\"table\",\"type\":\"ATableDescriptor\"}],\"response\":\"null\",\"errors\":[\"AIOError\",\"AIllegalArgument\",\"ATableExists\",\"AMasterNotRunning\"]},\"deleteTable\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"modifyTable\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"tableDescriptor\",\"type\":\"ATableDescriptor\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"enableTable\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"disableTable\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"flush\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"split\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"addFamily\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"family\",\"type\":\"AFamilyDescriptor\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"deleteFamily\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"family\",\"type\":\"bytes\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"modifyFamily\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"familyName\",\"type\":\"bytes\"},{\"name\":\"familyDescriptor\",\"type\":\"AFamilyDescriptor\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"get\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"get\",\"type\":\"AGet\"}],\"response\":\"AResult\",\"errors\":[\"AIOError\"]},\"exists\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"get\",\"type\":\"AGet\"}],\"response\":\"boolean\",\"errors\":[\"AIOError\"]},\"put\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"put\",\"type\":\"APut\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"delete\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"delete\",\"type\":\"ADelete\"}],\"response\":\"null\",\"errors\":[\"AIOError\"]},\"incrementColumnValue\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"row\",\"type\":\"bytes\"},{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":\"bytes\"},{\"name\":\"amount\",\"type\":\"long\"},{\"name\":\"writeToWAL\",\"type\":\"boolean\"}],\"response\":\"long\",\"errors\":[\"AIOError\"]},\"scannerOpen\":{\"request\":[{\"name\":\"table\",\"type\":\"bytes\"},{\"name\":\"scan\",\"type\":\"AScan\"}],\"response\":\"int\",\"errors\":[\"AIOError\"]},\"scannerClose\":{\"request\":[{\"name\":\"scannerId\",\"type\":\"int\"}],\"response\":\"null\",\"errors\":[\"AIOError\",\"AIllegalArgument\"]},\"scannerGetRows\":{\"request\":[{\"name\":\"scannerId\",\"type\":\"int\"},{\"name\":\"numberOfRows\",\"type\":\"int\"}],\"response\":{\"type\":\"array\",\"items\":\"AResult\"},\"errors\":[\"AIOError\",\"AIllegalArgument\"]}}}");
- org.apache.avro.util.Utf8 getHBaseVersion()
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- org.apache.hadoop.hbase.avro.generated.AClusterStatus getClusterStatus()
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- org.apache.avro.generic.GenericArray listTables()
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- org.apache.hadoop.hbase.avro.generated.ATableDescriptor describeTable(java.nio.ByteBuffer table)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- boolean isTableEnabled(java.nio.ByteBuffer table)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- boolean tableExists(java.nio.ByteBuffer table)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor describeFamily(java.nio.ByteBuffer table, java.nio.ByteBuffer family)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- java.lang.Void createTable(org.apache.hadoop.hbase.avro.generated.ATableDescriptor table)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError, org.apache.hadoop.hbase.avro.generated.AIllegalArgument, org.apache.hadoop.hbase.avro.generated.ATableExists, org.apache.hadoop.hbase.avro.generated.AMasterNotRunning;
- java.lang.Void deleteTable(java.nio.ByteBuffer table)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- java.lang.Void modifyTable(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.ATableDescriptor tableDescriptor)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- java.lang.Void enableTable(java.nio.ByteBuffer table)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- java.lang.Void disableTable(java.nio.ByteBuffer table)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- java.lang.Void flush(java.nio.ByteBuffer table)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- java.lang.Void split(java.nio.ByteBuffer table)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- java.lang.Void addFamily(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor family)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- java.lang.Void deleteFamily(java.nio.ByteBuffer table, java.nio.ByteBuffer family)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- java.lang.Void modifyFamily(java.nio.ByteBuffer table, java.nio.ByteBuffer familyName, org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor familyDescriptor)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- org.apache.hadoop.hbase.avro.generated.AResult get(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.AGet get)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- boolean exists(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.AGet get)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- java.lang.Void put(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.APut put)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- java.lang.Void delete(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.ADelete delete)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- long incrementColumnValue(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, long amount, boolean writeToWAL)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- int scannerOpen(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.AScan scan)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
- java.lang.Void scannerClose(int scannerId)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError, org.apache.hadoop.hbase.avro.generated.AIllegalArgument;
- org.apache.avro.generic.GenericArray scannerGetRows(int scannerId, int numberOfRows)
- throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError, org.apache.hadoop.hbase.avro.generated.AIllegalArgument;
+ java.lang.CharSequence getHBaseVersion() throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ org.apache.hadoop.hbase.avro.generated.AClusterStatus getClusterStatus() throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ java.util.List listTables() throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ org.apache.hadoop.hbase.avro.generated.ATableDescriptor describeTable(java.nio.ByteBuffer table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ boolean isTableEnabled(java.nio.ByteBuffer table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ boolean tableExists(java.nio.ByteBuffer table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor describeFamily(java.nio.ByteBuffer table, java.nio.ByteBuffer family) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ java.lang.Void createTable(org.apache.hadoop.hbase.avro.generated.ATableDescriptor table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError, org.apache.hadoop.hbase.avro.generated.AIllegalArgument, org.apache.hadoop.hbase.avro.generated.ATableExists, org.apache.hadoop.hbase.avro.generated.AMasterNotRunning;
+ java.lang.Void deleteTable(java.nio.ByteBuffer table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ java.lang.Void modifyTable(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.ATableDescriptor tableDescriptor) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ java.lang.Void enableTable(java.nio.ByteBuffer table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ java.lang.Void disableTable(java.nio.ByteBuffer table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ java.lang.Void flush(java.nio.ByteBuffer table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ java.lang.Void split(java.nio.ByteBuffer table) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ java.lang.Void addFamily(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor family) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ java.lang.Void deleteFamily(java.nio.ByteBuffer table, java.nio.ByteBuffer family) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ java.lang.Void modifyFamily(java.nio.ByteBuffer table, java.nio.ByteBuffer familyName, org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor familyDescriptor) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ org.apache.hadoop.hbase.avro.generated.AResult get(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.AGet get) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ boolean exists(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.AGet get) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ java.lang.Void put(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.APut put) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ java.lang.Void delete(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.ADelete delete) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ long incrementColumnValue(java.nio.ByteBuffer table, java.nio.ByteBuffer row, java.nio.ByteBuffer family, java.nio.ByteBuffer qualifier, long amount, boolean writeToWAL) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ int scannerOpen(java.nio.ByteBuffer table, org.apache.hadoop.hbase.avro.generated.AScan scan) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError;
+ java.lang.Void scannerClose(int scannerId) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError, org.apache.hadoop.hbase.avro.generated.AIllegalArgument;
+ java.util.List scannerGetRows(int scannerId, int numberOfRows) throws org.apache.avro.ipc.AvroRemoteException, org.apache.hadoop.hbase.avro.generated.AIOError, org.apache.hadoop.hbase.avro.generated.AIllegalArgument;
}
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/AServerInfo.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/AServerInfo.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/AServerInfo.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -6,9 +11,10 @@
public int infoPort;
public org.apache.hadoop.hbase.avro.generated.AServerLoad load;
public org.apache.hadoop.hbase.avro.generated.AServerAddress serverAddress;
- public org.apache.avro.util.Utf8 serverName;
+ public java.lang.CharSequence serverName;
public long startCode;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return infoPort;
@@ -19,13 +25,14 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
case 0: infoPort = (java.lang.Integer)value$; break;
case 1: load = (org.apache.hadoop.hbase.avro.generated.AServerLoad)value$; break;
case 2: serverAddress = (org.apache.hadoop.hbase.avro.generated.AServerAddress)value$; break;
- case 3: serverName = (org.apache.avro.util.Utf8)value$; break;
+ case 3: serverName = (java.lang.CharSequence)value$; break;
case 4: startCode = (java.lang.Long)value$; break;
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/ARegionLoad.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/ARegionLoad.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/ARegionLoad.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -10,6 +15,7 @@
public int storefileSizeMB;
public int stores;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return memStoreSizeMB;
@@ -21,6 +27,7 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/AScan.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/AScan.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/AScan.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -5,11 +10,12 @@
public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AScan\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"startRow\",\"type\":[\"bytes\",\"null\"]},{\"name\":\"stopRow\",\"type\":[\"bytes\",\"null\"]},{\"name\":\"columns\",\"type\":[{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AColumn\",\"fields\":[{\"name\":\"family\",\"type\":\"bytes\"},{\"name\":\"qualifier\",\"type\":[\"bytes\",\"null\"]}]}},\"null\"]},{\"name\":\"timestamp\",\"type\":[\"long\",\"null\"]},{\"name\":\"timerange\",\"type\":[{\"type\":\"record\",\"name\":\"ATimeRange\",\"fields\":[{\"name\":\"minStamp\",\"type\":\"long\"},{\"name\":\"maxStamp\",\"type\":\"long\"}]},\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]}]}");
public java.nio.ByteBuffer startRow;
public java.nio.ByteBuffer stopRow;
- public org.apache.avro.generic.GenericArray columns;
+ public java.util.List columns;
public java.lang.Long timestamp;
public org.apache.hadoop.hbase.avro.generated.ATimeRange timerange;
public java.lang.Integer maxVersions;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return startRow;
@@ -21,12 +27,13 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
case 0: startRow = (java.nio.ByteBuffer)value$; break;
case 1: stopRow = (java.nio.ByteBuffer)value$; break;
- case 2: columns = (org.apache.avro.generic.GenericArray)value$; break;
+ case 2: columns = (java.util.List)value$; break;
case 3: timestamp = (java.lang.Long)value$; break;
case 4: timerange = (org.apache.hadoop.hbase.avro.generated.ATimeRange)value$; break;
case 5: maxVersions = (java.lang.Integer)value$; break;
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/AClusterStatus.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/AClusterStatus.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/AClusterStatus.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -4,14 +9,15 @@
public class AClusterStatus extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"AClusterStatus\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"averageLoad\",\"type\":\"double\"},{\"name\":\"deadServerNames\",\"type\":{\"type\":\"array\",\"items\":\"string\"}},{\"name\":\"deadServers\",\"type\":\"int\"},{\"name\":\"hbaseVersion\",\"type\":\"string\"},{\"name\":\"regionsCount\",\"type\":\"int\"},{\"name\":\"requestsCount\",\"type\":\"int\"},{\"name\":\"serverInfos\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AServerInfo\",\"fields\":[{\"name\":\"infoPort\",\"type\":\"int\"},{\"name\":\"load\",\"type\":{\"type\":\"record\",\"name\":\"AServerLoad\",\"fields\":[{\"name\":\"load\",\"type\":\"int\"},{\"name\":\"maxHeapMB\",\"type\":\"int\"},{\"name\":\"memStoreSizeInMB\",\"type\":\"int\"},{\"name\":\"numberOfRegions\",\"type\":\"int\"},{\"name\":\"numberOfRequests\",\"type\":\"int\"},{\"name\":\"regionsLoad\",\"type\":{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"ARegionLoad\",\"fields\":[{\"name\":\"memStoreSizeMB\",\"type\":\"int\"},{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"storefileIndexSizeMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeMB\",\"type\":\"int\"},{\"name\":\"stores\",\"type\":\"int\"}]}}},{\"name\":\"storefileIndexSizeInMB\",\"type\":\"int\"},{\"name\":\"storefiles\",\"type\":\"int\"},{\"name\":\"storefileSizeInMB\",\"type\":\"int\"},{\"name\":\"usedHeapMB\",\"type\":\"int\"}]}},{\"name\":\"serverAddress\",\"type\":{\"type\":\"record\",\"name\":\"AServerAddress\",\"fields\":[{\"name\":\"hostname\",\"type\":\"string\"},{\"name\":\"inetSocketAddress\",\"type\":\"string\"},{\"name\":\"port\",\"type\":\"int\"}]}},{\"name\":\"serverName\",\"type\":\"string\"},{\"name\":\"startCode\",\"type\":\"long\"}]}}},{\"name\":\"servers\",\"type\":\"int\"}]}");
public double averageLoad;
- public org.apache.avro.generic.GenericArray deadServerNames;
+ public java.util.List deadServerNames;
public int deadServers;
- public org.apache.avro.util.Utf8 hbaseVersion;
+ public java.lang.CharSequence hbaseVersion;
public int regionsCount;
public int requestsCount;
- public org.apache.avro.generic.GenericArray serverInfos;
+ public java.util.List serverInfos;
public int servers;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return averageLoad;
@@ -25,16 +31,17 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
case 0: averageLoad = (java.lang.Double)value$; break;
- case 1: deadServerNames = (org.apache.avro.generic.GenericArray)value$; break;
+ case 1: deadServerNames = (java.util.List)value$; break;
case 2: deadServers = (java.lang.Integer)value$; break;
- case 3: hbaseVersion = (org.apache.avro.util.Utf8)value$; break;
+ case 3: hbaseVersion = (java.lang.CharSequence)value$; break;
case 4: regionsCount = (java.lang.Integer)value$; break;
case 5: requestsCount = (java.lang.Integer)value$; break;
- case 6: serverInfos = (org.apache.avro.generic.GenericArray)value$; break;
+ case 6: serverInfos = (java.util.List)value$; break;
case 7: servers = (java.lang.Integer)value$; break;
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/ATableDescriptor.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/ATableDescriptor.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/ATableDescriptor.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -4,7 +9,7 @@
public class ATableDescriptor extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"record\",\"name\":\"ATableDescriptor\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"families\",\"type\":[{\"type\":\"array\",\"items\":{\"type\":\"record\",\"name\":\"AFamilyDescriptor\",\"fields\":[{\"name\":\"name\",\"type\":\"bytes\"},{\"name\":\"compression\",\"type\":[{\"type\":\"enum\",\"name\":\"ACompressionAlgorithm\",\"symbols\":[\"LZO\",\"GZ\",\"NONE\"]},\"null\"]},{\"name\":\"maxVersions\",\"type\":[\"int\",\"null\"]},{\"name\":\"blocksize\",\"type\":[\"int\",\"null\"]},{\"name\":\"inMemory\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"timeToLive\",\"type\":[\"int\",\"null\"]},{\"name\":\"blockCacheEnabled\",\"type\":[\"boolean\",\"null\"]}]}},\"null\"]},{\"name\":\"maxFileSize\",\"type\":[\"long\",\"null\"]},{\"name\":\"memStoreFlushSize\",\"type\":[\"long\",\"null\"]},{\"name\":\"rootRegion\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"metaRegion\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"metaTable\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"readOnly\",\"type\":[\"boolean\",\"null\"]},{\"name\":\"deferredLogFlush\",\"type\":[\"boolean\",\"null\"]}]}");
public java.nio.ByteBuffer name;
- public org.apache.avro.generic.GenericArray families;
+ public java.util.List families;
public java.lang.Long maxFileSize;
public java.lang.Long memStoreFlushSize;
public java.lang.Boolean rootRegion;
@@ -13,6 +18,7 @@
public java.lang.Boolean readOnly;
public java.lang.Boolean deferredLogFlush;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return name;
@@ -27,11 +33,12 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
case 0: name = (java.nio.ByteBuffer)value$; break;
- case 1: families = (org.apache.avro.generic.GenericArray)value$; break;
+ case 1: families = (java.util.List)value$; break;
case 2: maxFileSize = (java.lang.Long)value$; break;
case 3: memStoreFlushSize = (java.lang.Long)value$; break;
case 4: rootRegion = (java.lang.Boolean)value$; break;
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/ATableExists.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/ATableExists.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/ATableExists.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -3,6 +8,7 @@
public class ATableExists extends org.apache.avro.specific.SpecificExceptionBase implements org.apache.avro.specific.SpecificRecord {
public static final org.apache.avro.Schema SCHEMA$ = org.apache.avro.Schema.parse("{\"type\":\"error\",\"name\":\"ATableExists\",\"namespace\":\"org.apache.hadoop.hbase.avro.generated\",\"fields\":[{\"name\":\"message\",\"type\":\"string\"}]}");
- public org.apache.avro.util.Utf8 message;
+ public java.lang.CharSequence message;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
@@ -11,10 +17,11 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
- case 0: message = (org.apache.avro.util.Utf8)value$; break;
+ case 0: message = (java.lang.CharSequence)value$; break;
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnValue.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnValue.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/AColumnValue.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -8,6 +13,7 @@
public java.nio.ByteBuffer value;
public java.lang.Long timestamp;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return family;
@@ -17,6 +23,7 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
Index: src/main/java/org/apache/hadoop/hbase/avro/generated/AColumn.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/generated/AColumn.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/generated/AColumn.java (working copy)
@@ -1,3 +1,8 @@
+/**
+ * Autogenerated by Avro
+ *
+ * DO NOT EDIT DIRECTLY
+ */
package org.apache.hadoop.hbase.avro.generated;
@SuppressWarnings("all")
@@ -6,6 +11,7 @@
public java.nio.ByteBuffer family;
public java.nio.ByteBuffer qualifier;
public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+ // Used by DatumWriter. Applications should not call.
public java.lang.Object get(int field$) {
switch (field$) {
case 0: return family;
@@ -13,6 +19,7 @@
default: throw new org.apache.avro.AvroRuntimeException("Bad index");
}
}
+ // Used by DatumReader. Applications should not call.
@SuppressWarnings(value="unchecked")
public void put(int field$, java.lang.Object value$) {
switch (field$) {
Index: src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/AvroServer.java (working copy)
@@ -535,7 +535,6 @@
System.exit(0);
}
- // TODO(hammer): Figure out a better way to keep the server alive!
protected static void doMain(final String[] args) throws Exception {
if (args.length < 1) {
printUsageAndExit();
@@ -562,8 +561,9 @@
Log LOG = LogFactory.getLog("AvroServer");
LOG.info("starting HBase Avro server on port " + Integer.toString(port));
SpecificResponder r = new SpecificResponder(HBase.class, new HBaseImpl());
- new HttpServer(r, 9090);
- Thread.sleep(1000000);
+ HttpServer server = new HttpServer(r, port);
+ server.start();
+ server.join();
}
// TODO(hammer): Look at Cassandra's daemonization and integration with JSVC
Index: src/main/java/org/apache/hadoop/hbase/avro/package.html
===================================================================
--- src/main/java/org/apache/hadoop/hbase/avro/package.html (revision 1053415)
+++ src/main/java/org/apache/hadoop/hbase/avro/package.html (working copy)
@@ -43,8 +43,8 @@
The files were generated by running the commands:
- java -jar avro-tools-1.3.2.jar genavro hbase.genavro hbase.avpr
- java -jar avro-tools-1.3.2.jar compile protocol hbase.avro $HBASE_HOME/src/java
+ java -jar avro-tools-1.4.1.jar idl hbase.avdl hbase.avpr
+ java -jar avro-tools-1.4.1.jar compile protocol hbase.avpr $HBASE_HOME/src/main/java
Index: pom.xml
===================================================================
--- pom.xml (revision 1053415)
+++ pom.xml (working copy)
@@ -188,6 +188,13 @@
true
+
+ repository.jboss.org
+ http://repository.jboss.org/nexus/content/groups/public-jboss/
+
+ false
+
+
@@ -456,7 +463,7 @@
1.6
- 1.3.3
+ 1.4.1
1.2
1.4
3.1
@@ -538,7 +545,7 @@
${log4j.version}
- org.apache.hadoop
+ org.apache.avro
avro
${avro.version}