valuesI;
+ private KeyValue cache;
+
+ public RowResultGenerator(final String tableName, final RowSpec rowspec,
+ final Filter filter) throws IllegalArgumentException, IOException {
+ HTablePool pool = RESTServlet.getInstance().getTablePool();
+ HTableInterface table = pool.getTable(tableName);
+ try {
+ Get get = new Get(rowspec.getRow());
+ if (rowspec.hasColumns()) {
+ for (byte[] col: rowspec.getColumns()) {
+ byte[][] split = KeyValue.parseColumn(col);
+ if (split.length == 2 && split[1].length != 0) {
+ get.addColumn(split[0], split[1]);
+ } else {
+ get.addFamily(split[0]);
+ }
+ }
+ } else {
+ // rowspec does not explicitly specify columns, return them all
+ for (HColumnDescriptor family:
+ table.getTableDescriptor().getFamilies()) {
+ get.addFamily(family.getName());
+ }
+ }
+ get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
+ get.setMaxVersions(rowspec.getMaxVersions());
+ if (filter != null) {
+ get.setFilter(filter);
+ }
+ Result result = table.get(get);
+ if (result != null && !result.isEmpty()) {
+ valuesI = result.list().iterator();
+ }
+ } finally {
+ pool.putTable(table);
+ }
+ }
+
+ public void close() {
+ }
+
+ public boolean hasNext() {
+ if (cache != null) {
+ return true;
+ }
+ if (valuesI == null) {
+ return false;
+ }
+ return valuesI.hasNext();
+ }
+
+ public KeyValue next() {
+ if (cache != null) {
+ KeyValue kv = cache;
+ cache = null;
+ return kv;
+ }
+ if (valuesI == null) {
+ return null;
+ }
+ try {
+ return valuesI.next();
+ } catch (NoSuchElementException e) {
+ return null;
+ }
+ }
+
+ public void putBack(KeyValue kv) {
+ this.cache = kv;
+ }
+
+ public void remove() {
+ throw new UnsupportedOperationException("remove not supported");
+ }
+
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/RowSpec.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/RowSpec.java
new file mode 100644
index 0000000..c577e79
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/RowSpec.java
@@ -0,0 +1,317 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.util.Collection;
+import java.util.TreeSet;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Parses a path based row/column/timestamp specification into its component
+ * elements.
+ *
+ *
+ */
+public class RowSpec {
+ public static final long DEFAULT_START_TIMESTAMP = 0;
+ public static final long DEFAULT_END_TIMESTAMP = Long.MAX_VALUE;
+
+ private byte[] row = HConstants.EMPTY_START_ROW;
+ private byte[] endRow = null;
+ private TreeSet columns =
+ new TreeSet(Bytes.BYTES_COMPARATOR);
+ private long startTime = DEFAULT_START_TIMESTAMP;
+ private long endTime = DEFAULT_END_TIMESTAMP;
+ private int maxVersions = HColumnDescriptor.DEFAULT_VERSIONS;
+
+ public RowSpec(String path) throws IllegalArgumentException {
+ int i = 0;
+ while (path.charAt(i) == '/') {
+ i++;
+ }
+ i = parseRowKeys(path, i);
+ i = parseColumns(path, i);
+ i = parseTimestamp(path, i);
+ }
+
+ private int parseRowKeys(final String path, int i)
+ throws IllegalArgumentException {
+ StringBuilder startRow = new StringBuilder();
+ StringBuilder endRow = null;
+ try {
+ char c;
+ boolean doEndRow = false;
+ while (i < path.length() && (c = path.charAt(i)) != '/') {
+ if (c == ',') {
+ doEndRow = true;
+ i++;
+ break;
+ }
+ startRow.append(c);
+ i++;
+ }
+ i++;
+ this.row = Bytes.toBytes(startRow.toString());
+ if (doEndRow) {
+ endRow = new StringBuilder();
+ while ((c = path.charAt(i)) != '/') {
+ endRow.append(c);
+ i++;
+ }
+ i++;
+ }
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException(e);
+ }
+ // HBase does not support wildcards on row keys so we will emulate a
+ // suffix glob by synthesizing appropriate start and end row keys for
+ // table scanning
+ if (startRow.charAt(startRow.length() - 1) == '*') {
+ if (endRow != null)
+ throw new IllegalArgumentException("invalid path: start row "+
+ "specified with wildcard");
+ this.row = Bytes.toBytes(startRow.substring(0,
+ startRow.lastIndexOf("*")));
+ this.endRow = new byte[this.row.length + 1];
+ System.arraycopy(this.row, 0, this.endRow, 0, this.row.length);
+ this.endRow[this.row.length] = (byte)255;
+ } else {
+ this.row = Bytes.toBytes(startRow.toString());
+ if (endRow != null) {
+ this.endRow = Bytes.toBytes(endRow.toString());
+ }
+ }
+ return i;
+ }
+
+ private int parseColumns(final String path, int i)
+ throws IllegalArgumentException {
+ if (i >= path.length()) {
+ return i;
+ }
+ try {
+ char c;
+ StringBuilder column = new StringBuilder();
+ boolean hasColon = false;
+ while (i < path.length() && (c = path.charAt(i)) != '/') {
+ if (c == ',') {
+ if (column.length() < 1) {
+ throw new IllegalArgumentException("invalid path");
+ }
+ if (!hasColon) {
+ column.append(':');
+ }
+ this.columns.add(Bytes.toBytes(column.toString()));
+ column = new StringBuilder();
+ hasColon = false;
+ i++;
+ continue;
+ }
+ if (c == ':') {
+ hasColon = true;
+ }
+ column.append(c);
+ i++;
+ }
+ i++;
+ // trailing list entry
+ if (column.length() > 1) {
+ if (!hasColon) {
+ column.append(':');
+ }
+ this.columns.add(Bytes.toBytes(column.toString()));
+ }
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException(e);
+ }
+ return i;
+ }
+
+ private int parseTimestamp(final String path, int i)
+ throws IllegalArgumentException {
+ if (i >= path.length()) {
+ return i;
+ }
+ long time0 = 0, time1 = 0;
+ try {
+ char c = 0;
+ StringBuilder stamp = new StringBuilder();
+ while (i < path.length()) {
+ c = path.charAt(i);
+ if (c == '/' || c == ',') {
+ break;
+ }
+ stamp.append(c);
+ i++;
+ }
+ try {
+ time0 = Long.valueOf(stamp.toString());
+ } catch (NumberFormatException e) {
+ throw new IllegalArgumentException(e);
+ }
+ if (c == ',') {
+ stamp = new StringBuilder();
+ i++;
+ while (i < path.length() && ((c = path.charAt(i)) != '/')) {
+ stamp.append(c);
+ i++;
+ }
+ try {
+ time1 = Long.valueOf(stamp.toString());
+ } catch (NumberFormatException e) {
+ throw new IllegalArgumentException(e);
+ }
+ }
+ if (c == '/') {
+ i++;
+ }
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException(e);
+ }
+ if (time1 != 0) {
+ startTime = time0;
+ endTime = time1;
+ } else {
+ endTime = time0;
+ }
+ return i;
+ }
+
+ public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns,
+ long startTime, long endTime, int maxVersions) {
+ this.row = startRow;
+ this.endRow = endRow;
+ if (columns != null) {
+ for (byte[] col: columns) {
+ this.columns.add(col);
+ }
+ }
+ this.startTime = startTime;
+ this.endTime = endTime;
+ this.maxVersions = maxVersions;
+ }
+
+ public RowSpec(byte[] startRow, byte[] endRow, Collection columns,
+ long startTime, long endTime, int maxVersions) {
+ this.row = startRow;
+ this.endRow = endRow;
+ if (columns != null) {
+ this.columns.addAll(columns);
+ }
+ this.startTime = startTime;
+ this.endTime = endTime;
+ this.maxVersions = maxVersions;
+ }
+
+ public boolean isSingleRow() {
+ return endRow == null;
+ }
+
+ public int getMaxVersions() {
+ return maxVersions;
+ }
+
+ public void setMaxVersions(final int maxVersions) {
+ this.maxVersions = maxVersions;
+ }
+
+ public boolean hasColumns() {
+ return !columns.isEmpty();
+ }
+
+ public byte[] getRow() {
+ return row;
+ }
+
+ public byte[] getStartRow() {
+ return row;
+ }
+
+ public boolean hasEndRow() {
+ return endRow != null;
+ }
+
+ public byte[] getEndRow() {
+ return endRow;
+ }
+
+ public void addColumn(final byte[] column) {
+ columns.add(column);
+ }
+
+ public byte[][] getColumns() {
+ return columns.toArray(new byte[columns.size()][]);
+ }
+
+ public boolean hasTimestamp() {
+ return (startTime == 0) && (endTime != Long.MAX_VALUE);
+ }
+
+ public long getTimestamp() {
+ return endTime;
+ }
+
+ public long getStartTime() {
+ return startTime;
+ }
+
+ public void setStartTime(final long startTime) {
+ this.startTime = startTime;
+ }
+
+ public long getEndTime() {
+ return endTime;
+ }
+
+ public void setEndTime(long endTime) {
+ this.endTime = endTime;
+ }
+
+ public String toString() {
+ StringBuilder result = new StringBuilder();
+ result.append("{startRow => '");
+ if (row != null) {
+ result.append(Bytes.toString(row));
+ }
+ result.append("', endRow => '");
+ if (endRow != null) {
+ result.append(Bytes.toString(endRow));
+ }
+ result.append("', columns => [");
+ for (byte[] col: columns) {
+ result.append(" '");
+ result.append(Bytes.toString(col));
+ result.append("'");
+ }
+ result.append(" ], startTime => ");
+ result.append(Long.toString(startTime));
+ result.append(", endTime => ");
+ result.append(Long.toString(endTime));
+ result.append(", maxVersions => ");
+ result.append(Integer.toString(maxVersions));
+ result.append("}");
+ return result.toString();
+ }
+
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/ScannerInstanceResource.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/ScannerInstanceResource.java
new file mode 100644
index 0000000..92a5b7e
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/ScannerInstanceResource.java
@@ -0,0 +1,159 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.stargate.model.CellModel;
+import org.apache.hadoop.hbase.stargate.model.CellSetModel;
+import org.apache.hadoop.hbase.stargate.model.RowModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import com.sun.jersey.core.util.Base64;
+
+public class ScannerInstanceResource implements Constants {
+ private static final Log LOG =
+ LogFactory.getLog(ScannerInstanceResource.class);
+
+ User user;
+ ResultGenerator generator;
+ String id;
+ int batch;
+ RESTServlet servlet;
+ CacheControl cacheControl;
+
+ public ScannerInstanceResource(User user, String table, String id,
+ ResultGenerator generator, int batch) throws IOException {
+ this.user = user;
+ this.id = id;
+ this.generator = generator;
+ this.batch = batch;
+ servlet = RESTServlet.getInstance();
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ @GET
+ @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
+ public Response get(final @Context UriInfo uriInfo) throws IOException {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ CellSetModel model = new CellSetModel();
+ RowModel rowModel = null;
+ byte[] rowKey = null;
+ int count = batch;
+ do {
+ KeyValue value = null;
+ try {
+ value = generator.next();
+ } catch (IllegalStateException e) {
+ ScannerResource.delete(id);
+ throw new WebApplicationException(Response.Status.GONE);
+ }
+ if (value == null) {
+ LOG.info("generator exhausted");
+ // respond with 204 (No Content) if an empty cell set would be
+ // returned
+ if (count == batch) {
+ return Response.noContent().build();
+ }
+ break;
+ }
+ if (rowKey == null) {
+ rowKey = value.getRow();
+ rowModel = new RowModel(rowKey);
+ }
+ if (!Bytes.equals(value.getRow(), rowKey)) {
+ // the user request limit is a transaction limit, so we need to
+ // account for scanner.next()
+ if (user != null && !servlet.userRequestLimit(user, 1)) {
+ generator.putBack(value);
+ break;
+ }
+ model.addRow(rowModel);
+ rowKey = value.getRow();
+ rowModel = new RowModel(rowKey);
+ }
+ rowModel.addCell(
+ new CellModel(value.getFamily(), value.getQualifier(),
+ value.getTimestamp(), value.getValue()));
+ } while (--count > 0);
+ model.addRow(rowModel);
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ return response.build();
+ }
+
+ @GET
+ @Produces(MIMETYPE_BINARY)
+ public Response getBinary(final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath() + " as " +
+ MIMETYPE_BINARY);
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ KeyValue value = generator.next();
+ if (value == null) {
+ LOG.info("generator exhausted");
+ return Response.noContent().build();
+ }
+ ResponseBuilder response = Response.ok(value.getValue());
+ response.cacheControl(cacheControl);
+ response.header("X-Row", Base64.encode(value.getRow()));
+ response.header("X-Column",
+ Base64.encode(
+ KeyValue.makeColumn(value.getFamily(), value.getQualifier())));
+ response.header("X-Timestamp", value.getTimestamp());
+ return response.build();
+ } catch (IllegalStateException e) {
+ ScannerResource.delete(id);
+ throw new WebApplicationException(Response.Status.GONE);
+ }
+ }
+
+ @DELETE
+ public Response delete(final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("DELETE " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ ScannerResource.delete(id);
+ return Response.ok().build();
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/ScannerResource.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/ScannerResource.java
new file mode 100644
index 0000000..41c71a3
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/ScannerResource.java
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.filter.Filter;
+
+import org.apache.hadoop.hbase.stargate.User;
+import org.apache.hadoop.hbase.stargate.model.ScannerModel;
+
+public class ScannerResource implements Constants {
+
+ private static final Log LOG = LogFactory.getLog(ScannerResource.class);
+
+ static final Map scanners =
+ new HashMap();
+
+ User user;
+ String tableName;
+ String actualTableName;
+ RESTServlet servlet;
+
+ public ScannerResource(User user, String table) throws IOException {
+ if (user != null) {
+ this.user = user;
+ this.actualTableName =
+ !user.isAdmin() ? user.getName() + "." + table : table;
+ } else {
+ this.actualTableName = table;
+ }
+ this.tableName = table;
+ servlet = RESTServlet.getInstance();
+ }
+
+ static void delete(final String id) {
+ synchronized (scanners) {
+ ScannerInstanceResource instance = scanners.remove(id);
+ if (instance != null) {
+ instance.generator.close();
+ }
+ }
+ }
+
+ Response update(final ScannerModel model, final boolean replace,
+ final UriInfo uriInfo) {
+ servlet.getMetrics().incrementRequests(1);
+ byte[] endRow = model.hasEndRow() ? model.getEndRow() : null;
+ RowSpec spec = new RowSpec(model.getStartRow(), endRow,
+ model.getColumns(), model.getStartTime(), model.getEndTime(), 1);
+ try {
+ Filter filter = ScannerResultGenerator.buildFilterFromModel(model);
+ ScannerResultGenerator gen =
+ new ScannerResultGenerator(actualTableName, spec, filter);
+ String id = gen.getID();
+ ScannerInstanceResource instance =
+ new ScannerInstanceResource(user, actualTableName, id, gen,
+ model.getBatch());
+ synchronized (scanners) {
+ scanners.put(id, instance);
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("new scanner: " + id);
+ }
+ UriBuilder builder = uriInfo.getAbsolutePathBuilder();
+ URI uri = builder.path(id).build();
+ return Response.created(uri).build();
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ } catch (Exception e) {
+ throw new WebApplicationException(e, Response.Status.BAD_REQUEST);
+ }
+ }
+
+ @PUT
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
+ public Response put(final ScannerModel model,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("PUT " + uriInfo.getAbsolutePath());
+ }
+ return update(model, true, uriInfo);
+ }
+
+ @POST
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
+ public Response post(final ScannerModel model,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("POST " + uriInfo.getAbsolutePath());
+ }
+ return update(model, false, uriInfo);
+ }
+
+ @Path("{scanner: .+}")
+ public ScannerInstanceResource getScannerInstanceResource(
+ final @PathParam("scanner") String id) {
+ synchronized (scanners) {
+ ScannerInstanceResource instance = scanners.get(id);
+ if (instance == null) {
+ throw new WebApplicationException(Response.Status.NOT_FOUND);
+ }
+ return instance;
+ }
+ }
+
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/ScannerResultGenerator.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/ScannerResultGenerator.java
new file mode 100644
index 0000000..b2f25d8
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/ScannerResultGenerator.java
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.UnknownScannerException;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.HTablePool;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.stargate.model.ScannerModel;
+import org.apache.hadoop.util.StringUtils;
+
+public class ScannerResultGenerator extends ResultGenerator {
+
+ private static final Log LOG =
+ LogFactory.getLog(ScannerResultGenerator.class);
+
+ public static Filter buildFilterFromModel(final ScannerModel model)
+ throws Exception {
+ String filter = model.getFilter();
+ if (filter == null || filter.length() == 0) {
+ return null;
+ }
+ return buildFilter(filter);
+ }
+
+ private String id;
+ private Iterator rowI;
+ private KeyValue cache;
+ private ResultScanner scanner;
+ private Result cached;
+
+ public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
+ final Filter filter) throws IllegalArgumentException, IOException {
+ HTablePool pool = RESTServlet.getInstance().getTablePool();
+ HTableInterface table = pool.getTable(tableName);
+ try {
+ Scan scan;
+ if (rowspec.hasEndRow()) {
+ scan = new Scan(rowspec.getStartRow(), rowspec.getEndRow());
+ } else {
+ scan = new Scan(rowspec.getStartRow());
+ }
+ if (rowspec.hasColumns()) {
+ byte[][] columns = rowspec.getColumns();
+ for (byte[] column: columns) {
+ byte[][] split = KeyValue.parseColumn(column);
+ if (split.length > 1 && (split[1] != null && split[1].length != 0)) {
+ scan.addColumn(split[0], split[1]);
+ } else {
+ scan.addFamily(split[0]);
+ }
+ }
+ } else {
+ for (HColumnDescriptor family:
+ table.getTableDescriptor().getFamilies()) {
+ scan.addFamily(family.getName());
+ }
+ }
+ scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
+ scan.setMaxVersions(rowspec.getMaxVersions());
+ if (filter != null) {
+ scan.setFilter(filter);
+ }
+ // always disable block caching on the cluster when scanning
+ scan.setCacheBlocks(false);
+ scanner = table.getScanner(scan);
+ cached = null;
+ id = Long.toString(System.currentTimeMillis()) +
+ Integer.toHexString(scanner.hashCode());
+ } finally {
+ pool.putTable(table);
+ }
+ }
+
+ public String getID() {
+ return id;
+ }
+
+ public void close() {
+ }
+
+ public boolean hasNext() {
+ if (cache != null) {
+ return true;
+ }
+ if (rowI != null && rowI.hasNext()) {
+ return true;
+ }
+ if (cached != null) {
+ return true;
+ }
+ try {
+ Result result = scanner.next();
+ if (result != null && !result.isEmpty()) {
+ cached = result;
+ }
+ } catch (UnknownScannerException e) {
+ throw new IllegalArgumentException(e);
+ } catch (IOException e) {
+ LOG.error(StringUtils.stringifyException(e));
+ }
+ return cached != null;
+ }
+
+ public KeyValue next() {
+ if (cache != null) {
+ KeyValue kv = cache;
+ cache = null;
+ return kv;
+ }
+ boolean loop;
+ do {
+ loop = false;
+ if (rowI != null) {
+ if (rowI.hasNext()) {
+ return rowI.next();
+ } else {
+ rowI = null;
+ }
+ }
+ if (cached != null) {
+ rowI = cached.list().iterator();
+ loop = true;
+ cached = null;
+ } else {
+ Result result = null;
+ try {
+ result = scanner.next();
+ } catch (UnknownScannerException e) {
+ throw new IllegalArgumentException(e);
+ } catch (IOException e) {
+ LOG.error(StringUtils.stringifyException(e));
+ }
+ if (result != null && !result.isEmpty()) {
+ rowI = result.list().iterator();
+ loop = true;
+ }
+ }
+ } while (loop);
+ return null;
+ }
+
+ public void putBack(KeyValue kv) {
+ this.cache = kv;
+ }
+
+ public void remove() {
+ throw new UnsupportedOperationException("remove not supported");
+ }
+
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/SchemaResource.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/SchemaResource.java
new file mode 100644
index 0000000..f84f609
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/SchemaResource.java
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+import java.util.Map;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.xml.namespace.QName;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.HTablePool;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.stargate.User;
+import org.apache.hadoop.hbase.stargate.model.ColumnSchemaModel;
+import org.apache.hadoop.hbase.stargate.model.TableSchemaModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class SchemaResource implements Constants {
+ private static final Log LOG = LogFactory.getLog(SchemaResource.class);
+
+ User user;
+ String tableName;
+ String actualTableName;
+ CacheControl cacheControl;
+ RESTServlet servlet;
+
+ public SchemaResource(User user, String table) throws IOException {
+ if (user != null) {
+ this.user = user;
+ this.actualTableName =
+ !user.isAdmin() ? (user.getName() + "." + table) : table;
+ } else {
+ this.actualTableName = table;
+ }
+ this.tableName = table;
+ servlet = RESTServlet.getInstance();
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ private HTableDescriptor getTableSchema() throws IOException,
+ TableNotFoundException {
+ HTablePool pool = servlet.getTablePool();
+ HTableInterface table = pool.getTable(actualTableName);
+ try {
+ return table.getTableDescriptor();
+ } finally {
+ pool.putTable(table);
+ }
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
+ public Response get(final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ HTableDescriptor htd = getTableSchema();
+ TableSchemaModel model = new TableSchemaModel();
+ model.setName(tableName);
+ for (Map.Entry e:
+ htd.getValues().entrySet()) {
+ model.addAttribute(Bytes.toString(e.getKey().get()),
+ Bytes.toString(e.getValue().get()));
+ }
+ for (HColumnDescriptor hcd: htd.getFamilies()) {
+ ColumnSchemaModel columnModel = new ColumnSchemaModel();
+ columnModel.setName(hcd.getNameAsString());
+ for (Map.Entry e:
+ hcd.getValues().entrySet()) {
+ columnModel.addAttribute(Bytes.toString(e.getKey().get()),
+ Bytes.toString(e.getValue().get()));
+ }
+ model.addColumnFamily(columnModel);
+ }
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ return response.build();
+ } catch (TableNotFoundException e) {
+ throw new WebApplicationException(Response.Status.NOT_FOUND);
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ }
+ }
+
+ private Response replace(final byte[] tableName,
+ final TableSchemaModel model, final UriInfo uriInfo,
+ final HBaseAdmin admin) {
+ try {
+ HTableDescriptor htd = new HTableDescriptor(tableName);
+ for (Map.Entry e: model.getAny().entrySet()) {
+ htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+ }
+ for (ColumnSchemaModel family: model.getColumns()) {
+ HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
+ for (Map.Entry e: family.getAny().entrySet()) {
+ hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+ }
+ htd.addFamily(hcd);
+ }
+ if (admin.tableExists(tableName)) {
+ admin.disableTable(tableName);
+ admin.modifyTable(tableName, htd);
+ admin.enableTable(tableName);
+ } else try {
+ admin.createTable(htd);
+ } catch (TableExistsException e) {
+ // race, someone else created a table with the same name
+ throw new WebApplicationException(e, Response.Status.NOT_MODIFIED);
+ }
+ return Response.created(uriInfo.getAbsolutePath()).build();
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ }
+ }
+
+ private Response update(final byte[] tableName,final TableSchemaModel model,
+ final UriInfo uriInfo, final HBaseAdmin admin) {
+ try {
+ HTableDescriptor htd = admin.getTableDescriptor(tableName);
+ admin.disableTable(tableName);
+ try {
+ for (ColumnSchemaModel family: model.getColumns()) {
+ HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
+ for (Map.Entry e: family.getAny().entrySet()) {
+ hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
+ }
+ if (htd.hasFamily(hcd.getName())) {
+ admin.modifyColumn(tableName, hcd.getName(), hcd);
+ } else {
+ admin.addColumn(model.getName(), hcd);
+ }
+ }
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.INTERNAL_SERVER_ERROR);
+ } finally {
+ admin.enableTable(tableName);
+ }
+ return Response.ok().build();
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ }
+ }
+
+ private Response update(final TableSchemaModel model, final boolean replace,
+ final UriInfo uriInfo) {
+ try {
+ servlet.invalidateMaxAge(tableName);
+ byte[] tableName = Bytes.toBytes(actualTableName);
+ HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
+ if (replace || !admin.tableExists(tableName)) {
+ return replace(tableName, model, uriInfo, admin);
+ } else {
+ return update(tableName, model, uriInfo, admin);
+ }
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ }
+ }
+
+ @PUT
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
+ public Response put(final TableSchemaModel model,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("PUT " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ // use the name given in the path, but warn if the name on the path and
+ // the name in the schema are different
+ if (model.getName() != tableName) {
+ LOG.warn("table name mismatch: path='" + tableName + "', schema='" +
+ model.getName() + "'");
+ }
+ return update(model, true, uriInfo);
+ }
+
+ @POST
+ @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
+ public Response post(final TableSchemaModel model,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("PUT " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ // use the name given in the path, but warn if the name on the path and
+ // the name in the schema are different
+ if (model.getName() != tableName) {
+ LOG.warn("table name mismatch: path='" + tableName + "', schema='" +
+ model.getName() + "'");
+ }
+ return update(model, false, uriInfo);
+ }
+
+ @DELETE
+ public Response delete(final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("DELETE " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
+ admin.disableTable(actualTableName);
+ admin.deleteTable(actualTableName);
+ return Response.ok().build();
+ } catch (TableNotFoundException e) {
+ throw new WebApplicationException(Response.Status.NOT_FOUND);
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ }
+ }
+
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/StorageClusterStatusResource.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/StorageClusterStatusResource.java
new file mode 100644
index 0000000..494b44c
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/StorageClusterStatusResource.java
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.UriInfo;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.HServerLoad;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.stargate.model.StorageClusterStatusModel;
+
+public class StorageClusterStatusResource implements Constants {
+ private static final Log LOG =
+ LogFactory.getLog(StorageClusterStatusResource.class);
+
+ private CacheControl cacheControl;
+ private RESTServlet servlet;
+
+ public StorageClusterStatusResource() throws IOException {
+ servlet = RESTServlet.getInstance();
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
+ public Response get(final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ try {
+ HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
+ ClusterStatus status = admin.getClusterStatus();
+ StorageClusterStatusModel model = new StorageClusterStatusModel();
+ model.setRegions(status.getRegionsCount());
+ model.setRequests(status.getRequestsCount());
+ model.setAverageLoad(status.getAverageLoad());
+ for (HServerInfo info: status.getServerInfo()) {
+ HServerLoad load = info.getLoad();
+ StorageClusterStatusModel.Node node =
+ model.addLiveNode(
+ info.getServerAddress().getHostname() + ":" +
+ Integer.toString(info.getServerAddress().getPort()),
+ info.getStartCode(), load.getUsedHeapMB(),
+ load.getMaxHeapMB());
+ node.setRequests(load.getNumberOfRequests());
+ for (HServerLoad.RegionLoad region: load.getRegionsLoad()) {
+ node.addRegion(region.getName(), region.getStores(),
+ region.getStorefiles(), region.getStorefileSizeMB(),
+ region.getMemStoreSizeMB(), region.getStorefileIndexSizeMB());
+ }
+ }
+ for (String name: status.getDeadServerNames()) {
+ model.addDeadNode(name);
+ }
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ return response.build();
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ }
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/StorageClusterVersionResource.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/StorageClusterVersionResource.java
new file mode 100644
index 0000000..51c84f0
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/StorageClusterVersionResource.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+
+import javax.ws.rs.GET;
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import javax.ws.rs.core.Response.ResponseBuilder;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.stargate.model.StorageClusterVersionModel;
+
+public class StorageClusterVersionResource implements Constants {
+ private static final Log LOG =
+ LogFactory.getLog(StorageClusterVersionResource.class);
+
+ private CacheControl cacheControl;
+ private RESTServlet servlet;
+
+ public StorageClusterVersionResource() throws IOException {
+ servlet = RESTServlet.getInstance();
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON})
+ public Response get(final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ Configuration conf = servlet.getConfiguration();
+ try {
+ HBaseAdmin admin = new HBaseAdmin(conf);
+ StorageClusterVersionModel model = new StorageClusterVersionModel();
+ model.setVersion(admin.getClusterStatus().getHBaseVersion());
+ ResponseBuilder response = Response.ok(model);
+ response.cacheControl(cacheControl);
+ return response.build();
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.SERVICE_UNAVAILABLE);
+ }
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/TableResource.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/TableResource.java
new file mode 100644
index 0000000..eebaf45
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/TableResource.java
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.Response;
+
+import org.apache.hadoop.hbase.stargate.User;
+
+public class TableResource implements Constants {
+
+ User user;
+ String table;
+
+ public TableResource(User user, String table) {
+ this.user = user;
+ this.table = table;
+ }
+
+ @Path("regions")
+ public RegionsResource getRegionsResource() throws IOException {
+ return new RegionsResource(user, table);
+ }
+
+ @Path("scanner")
+ public ScannerResource getScannerResource() throws IOException {
+ return new ScannerResource(user, table);
+ }
+
+ @Path("schema")
+ public SchemaResource getSchemaResource() throws IOException {
+ return new SchemaResource(user, table);
+ }
+
+ @Path("{rowspec: .+}")
+ public RowResource getRowResource(
+ final @PathParam("rowspec") String rowspec,
+ final @QueryParam("v") String versions) {
+ try {
+ return new RowResource(user, table, rowspec, versions);
+ } catch (IOException e) {
+ throw new WebApplicationException(e,
+ Response.Status.INTERNAL_SERVER_ERROR);
+ }
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/User.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/User.java
new file mode 100644
index 0000000..b317c33
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/User.java
@@ -0,0 +1,155 @@
+package org.apache.hadoop.hbase.stargate;
+
+import java.security.MessageDigest;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+/** Representation of an authorized user */
+public class User implements Constants {
+
+ public static final User DEFAULT_USER = new User("default",
+ "00000000000000000000000000000000", false, true);
+
+ private String name;
+ private String token;
+ private boolean admin;
+ private boolean disabled = false;
+
+ /**
+ * Constructor
+ *
+ * Creates an access token. (Normally, you don't want this.)
+ * @param name user name
+ * @param admin true if user has administrator privilege
+ * @throws Exception
+ */
+ public User(String name, boolean admin) throws Exception {
+ this.name = name;
+ this.admin = admin;
+ byte[] digest = MessageDigest.getInstance("MD5")
+ .digest(Bytes.toBytes(name));
+ StringBuffer sb = new StringBuffer();
+ for (int i = 0; i < digest.length; i++) {
+ sb.append(Integer.toHexString(0xff & digest[i]));
+ }
+ this.token = sb.toString();
+ }
+
+ /**
+ * Constructor
+ * @param name user name
+ * @param token access token, a 16 char hex string
+ * @param admin true if user has administrator privilege
+ */
+ public User(String name, String token, boolean admin) {
+ this(name, token, admin, false);
+ }
+
+ /**
+ * Constructor
+ * @param name user name
+ * @param token access token, a 16 char hex string
+ * @param admin true if user has administrator privilege
+ * @param disabled true if user is disabled
+ */
+ public User(String name, String token, boolean admin, boolean disabled) {
+ this.name = name;
+ this.token = token;
+ this.admin = admin;
+ this.disabled = disabled;
+ }
+
+ /**
+ * @return user name
+ */
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @param name user name
+ */
+ public void setName(final String name) {
+ this.name = name;
+ }
+
+ /**
+ * @return access token, a 16 char hex string
+ */
+ public String getToken() {
+ return token;
+ }
+
+ /**
+ * @param token access token, a 16 char hex string
+ */
+ public void setToken(final String token) {
+ this.token = token;
+ }
+
+ /**
+ * @return true if user has administrator privilege
+ */
+ public boolean isAdmin() {
+ return admin;
+ }
+
+ /**
+ * @param admin true if user has administrator privilege
+ */
+ public void setAdmin(final boolean admin) {
+ this.admin = admin;
+ }
+
+ /**
+ * @return true if user is disabled
+ */
+ public boolean isDisabled() {
+ return disabled;
+ }
+
+ /**
+ * @param admin true if user is disabled
+ */
+ public void setDisabled(boolean disabled) {
+ this.disabled = disabled;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + (admin ? 1231 : 1237);
+ result = prime * result + (disabled ? 1231 : 1237);
+ result = prime * result + ((name == null) ? 0 : name.hashCode());
+ result = prime * result + ((token == null) ? 0 : token.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ User other = (User) obj;
+ if (admin != other.admin)
+ return false;
+ if (disabled != other.disabled)
+ return false;
+ if (name == null) {
+ if (other.name != null)
+ return false;
+ } else if (!name.equals(other.name))
+ return false;
+ if (token == null) {
+ if (other.token != null)
+ return false;
+ } else if (!token.equals(other.token))
+ return false;
+ return true;
+ }
+
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/VersionResource.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/VersionResource.java
new file mode 100644
index 0000000..a778278
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/VersionResource.java
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+
+import javax.servlet.ServletContext;
+import javax.ws.rs.GET;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.core.CacheControl;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import javax.ws.rs.core.Response.ResponseBuilder;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.stargate.model.VersionModel;
+
+/**
+ * Implements Stargate software version reporting via
+ *
+ * /version/stargate
+ *
+ * /version (alias for /version/stargate )
+ */
+public class VersionResource implements Constants {
+ private static final Log LOG = LogFactory.getLog(VersionResource.class);
+
+ private CacheControl cacheControl;
+ private RESTServlet servlet;
+
+ public VersionResource() throws IOException {
+ servlet = RESTServlet.getInstance();
+ cacheControl = new CacheControl();
+ cacheControl.setNoCache(true);
+ cacheControl.setNoTransform(false);
+ }
+
+ /**
+ * Build a response for a version request.
+ * @param context servlet context
+ * @param uriInfo (JAX-RS context variable) request URL
+ * @return a response for a version request
+ */
+ @GET
+ @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
+ public Response get(final @Context ServletContext context,
+ final @Context UriInfo uriInfo) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("GET " + uriInfo.getAbsolutePath());
+ }
+ servlet.getMetrics().incrementRequests(1);
+ ResponseBuilder response = Response.ok(new VersionModel(context));
+ response.cacheControl(cacheControl);
+ return response.build();
+ }
+
+ /**
+ * Dispatch to StorageClusterVersionResource
+ */
+ @Path("cluster")
+ public StorageClusterVersionResource getClusterVersionResource()
+ throws IOException {
+ return new StorageClusterVersionResource();
+ }
+
+ /**
+ * Dispatch /version/stargate to self.
+ */
+ @Path("stargate")
+ public VersionResource getVersionResource() {
+ return this;
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/auth/Authenticator.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/auth/Authenticator.java
new file mode 100644
index 0000000..dcba2c8
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/auth/Authenticator.java
@@ -0,0 +1,11 @@
+package org.apache.hadoop.hbase.stargate.auth;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.stargate.User;
+
+public abstract class Authenticator {
+
+ public abstract User getUserForToken(String token) throws IOException;
+
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/auth/HBCAuthenticator.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/auth/HBCAuthenticator.java
new file mode 100644
index 0000000..8248bd3
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/auth/HBCAuthenticator.java
@@ -0,0 +1,39 @@
+package org.apache.hadoop.hbase.stargate.auth;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.stargate.User;
+
+public class HBCAuthenticator extends Authenticator {
+
+ Configuration conf;
+
+ /**
+ * Default constructor
+ */
+ public HBCAuthenticator() {
+ this(HBaseConfiguration.create());
+ }
+
+ /**
+ * Constructor
+ * @param conf
+ */
+ public HBCAuthenticator(Configuration conf) {
+ this.conf = conf;
+ }
+
+ @Override
+ public User getUserForToken(String token) {
+ String name = conf.get("stargate.auth.token." + token);
+ if (name == null) {
+ return null;
+ }
+ boolean admin = conf.getBoolean("stargate.auth.user." + name + ".admin",
+ false);
+ boolean disabled = conf.getBoolean("stargate.auth.user." + name + ".disabled",
+ false);
+ return new User(name, token, admin, disabled);
+ }
+
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/auth/HTableAuthenticator.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/auth/HTableAuthenticator.java
new file mode 100644
index 0000000..e1cfeb6
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/auth/HTableAuthenticator.java
@@ -0,0 +1,90 @@
+package org.apache.hadoop.hbase.stargate.auth;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.stargate.Constants;
+import org.apache.hadoop.hbase.stargate.User;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class HTableAuthenticator extends Authenticator implements Constants {
+
+ static final byte[] USER = Bytes.toBytes("user");
+ static final byte[] NAME = Bytes.toBytes("name");
+ static final byte[] ADMIN = Bytes.toBytes("admin");
+ static final byte[] DISABLED = Bytes.toBytes("disabled");
+
+ Configuration conf;
+ String tableName;
+ HTable table;
+
+ /**
+ * Default constructor
+ */
+ public HTableAuthenticator() {
+ this(HBaseConfiguration.create());
+ }
+
+ /**
+ * Constructor
+ * @param conf
+ */
+ public HTableAuthenticator(Configuration conf) {
+ this(conf, conf.get("stargate.auth.htable.name", USERS_TABLE));
+ }
+
+ /**
+ * Constructor
+ * @param conf
+ * @param tableName
+ */
+ public HTableAuthenticator(Configuration conf, String tableName) {
+ this.conf = conf;
+ this.tableName = tableName;
+ }
+
+ /**
+ * Constructor
+ * @param conf
+ * @param table
+ */
+ public HTableAuthenticator(Configuration conf, HTable table) {
+ this.conf = conf;
+ this.table = table;
+ this.tableName = Bytes.toString(table.getTableName());
+ }
+
+ @Override
+ public User getUserForToken(String token) throws IOException {
+ if (table == null) {
+ this.table = new HTable(conf, tableName);
+ }
+ Get get = new Get(Bytes.toBytes(token));
+ get.addColumn(USER, NAME);
+ get.addColumn(USER, ADMIN);
+ get.addColumn(USER, DISABLED);
+ Result result = table.get(get);
+ byte[] value = result.getValue(USER, NAME);
+ if (value == null) {
+ return null;
+ }
+ String name = Bytes.toString(value);
+ boolean admin = false;
+ value = result.getValue(USER, ADMIN);
+ if (value != null) {
+ admin = Bytes.toBoolean(value);
+ }
+ boolean disabled = false;
+ value = result.getValue(USER, DISABLED);
+ if (value != null) {
+ disabled = Bytes.toBoolean(value);
+ }
+ return new User(name, token, admin, disabled);
+ }
+
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/auth/JDBCAuthenticator.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/auth/JDBCAuthenticator.java
new file mode 100644
index 0000000..7ccc464
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/auth/JDBCAuthenticator.java
@@ -0,0 +1,88 @@
+package org.apache.hadoop.hbase.stargate.auth;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.stargate.User;
+import org.apache.hadoop.util.StringUtils;
+
+public class JDBCAuthenticator extends Authenticator {
+
+ static final Log LOG = LogFactory.getLog(JDBCAuthenticator.class);
+ static final int MAX_RETRIES = 5;
+ static final long RETRY_SLEEP_TIME = 1000 * 2;
+
+ String url;
+ String table;
+ String user;
+ String password;
+ Connection connection;
+ PreparedStatement userFetchStmt;
+
+ /**
+ * Constructor
+ * @param conf
+ */
+ public JDBCAuthenticator(HBaseConfiguration conf) {
+ this(conf.get("stargate.auth.jdbc.url"),
+ conf.get("stargate.auth.jdbc.table"),
+ conf.get("stargate.auth.jdbc.user"),
+ conf.get("stargate.auth.jdbc.password"));
+ }
+
+ /**
+ * Constructor
+ * @param url
+ * @param table
+ * @param user
+ * @param password
+ */
+ public JDBCAuthenticator(String url, String table, String user,
+ String password) {
+ this.url = url;
+ this.table = table;
+ this.user = user;
+ this.password = password;
+ }
+
+ @Override
+ public User getUserForToken(String token) throws IOException {
+ int retries = 0;
+ while (true) try {
+ if (connection == null) {
+ connection = DriverManager.getConnection(url, user, password);
+ userFetchStmt = connection.prepareStatement(
+ "SELECT name, admin, disabled FROM " + table + " WHERE token = ?");
+ }
+ ResultSet results;
+ synchronized (userFetchStmt) {
+ userFetchStmt.setString(1, token);
+ results = userFetchStmt.executeQuery();
+ }
+ if (!results.next()) {
+ return null;
+ }
+ return new User(results.getString(1), token, results.getBoolean(2),
+ results.getBoolean(3));
+ } catch (SQLException e) {
+ connection = null;
+ if (++retries > MAX_RETRIES) {
+ throw new IOException(e);
+ } else try {
+ LOG.warn(StringUtils.stringifyException(e));
+ Thread.sleep(RETRY_SLEEP_TIME);
+ } catch (InterruptedException ex) {
+ // ignore
+ }
+ }
+ }
+
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/auth/ZooKeeperAuthenticator.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/auth/ZooKeeperAuthenticator.java
new file mode 100644
index 0000000..77dc247
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/auth/ZooKeeperAuthenticator.java
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.auth;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.stargate.Constants;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.stargate.User;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
+
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.ZooDefs.Ids;
+import org.apache.zookeeper.data.Stat;
+
+import org.json.JSONObject;
+
+/**
+ * A simple authenticator module for ZooKeeper.
+ *
+ * /stargate/
+ * users/
+ * <token>
+ * Where <token> is a JSON formatted user record with the keys
+ * 'name' (String, required), 'token' (String, optional), 'admin' (boolean,
+ * optional), and 'disabled' (boolean, optional).
+ */
+public class ZooKeeperAuthenticator extends Authenticator
+ implements Constants {
+
+ final String usersZNode;
+ ZooKeeperWrapper wrapper;
+
+ private boolean ensureParentExists(final String znode) {
+ int index = znode.lastIndexOf("/");
+ if (index <= 0) { // Parent is root, which always exists.
+ return true;
+ }
+ return ensureExists(znode.substring(0, index));
+ }
+
+ private boolean ensureExists(final String znode) {
+ ZooKeeper zk = wrapper.getZooKeeper();
+ try {
+ Stat stat = zk.exists(znode, false);
+ if (stat != null) {
+ return true;
+ }
+ zk.create(znode, new byte[0], Ids.OPEN_ACL_UNSAFE,
+ CreateMode.PERSISTENT);
+ return true;
+ } catch (KeeperException.NodeExistsException e) {
+ return true; // ok, move on.
+ } catch (KeeperException.NoNodeException e) {
+ return ensureParentExists(znode) && ensureExists(znode);
+ } catch (KeeperException e) {
+ } catch (InterruptedException e) {
+ }
+ return false;
+ }
+
+ /**
+ * Constructor
+ * @param conf
+ * @throws IOException
+ */
+ public ZooKeeperAuthenticator(Configuration conf) throws IOException {
+ this(conf, new ZooKeeperWrapper(conf, new Watcher() {
+ public void process(WatchedEvent event) { }
+ }));
+ ensureExists(USERS_ZNODE_ROOT);
+ }
+
+ /**
+ * Constructor
+ * @param conf
+ * @param wrapper
+ */
+ public ZooKeeperAuthenticator(Configuration conf,
+ ZooKeeperWrapper wrapper) {
+ this.usersZNode = conf.get("stargate.auth.zk.users", USERS_ZNODE_ROOT);
+ this.wrapper = wrapper;
+ }
+
+ @Override
+ public User getUserForToken(String token) throws IOException {
+ ZooKeeper zk = wrapper.getZooKeeper();
+ try {
+ byte[] data = zk.getData(usersZNode + "/" + token, null, null);
+ if (data == null) {
+ return null;
+ }
+ JSONObject o = new JSONObject(Bytes.toString(data));
+ if (!o.has("name")) {
+ throw new IOException("invalid record, missing 'name'");
+ }
+ String name = o.getString("name");
+ boolean admin = false;
+ if (o.has("admin")) { admin = o.getBoolean("admin"); }
+ boolean disabled = false;
+ if (o.has("disabled")) { disabled = o.getBoolean("disabled"); }
+ return new User(name, token, admin, disabled);
+ } catch (KeeperException.NoNodeException e) {
+ return null;
+ } catch (Exception e) {
+ throw new IOException(e);
+ }
+ }
+
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/client/Client.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/client/Client.java
new file mode 100644
index 0000000..4466876
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/client/Client.java
@@ -0,0 +1,438 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.client;
+
+import java.io.IOException;
+
+import org.apache.commons.httpclient.Header;
+import org.apache.commons.httpclient.HttpClient;
+import org.apache.commons.httpclient.HttpMethod;
+import org.apache.commons.httpclient.HttpVersion;
+import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager;
+import org.apache.commons.httpclient.URI;
+import org.apache.commons.httpclient.methods.ByteArrayRequestEntity;
+import org.apache.commons.httpclient.methods.DeleteMethod;
+import org.apache.commons.httpclient.methods.GetMethod;
+import org.apache.commons.httpclient.methods.HeadMethod;
+import org.apache.commons.httpclient.methods.PostMethod;
+import org.apache.commons.httpclient.methods.PutMethod;
+import org.apache.commons.httpclient.params.HttpClientParams;
+import org.apache.commons.httpclient.params.HttpConnectionManagerParams;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * A wrapper around HttpClient which provides some useful function and
+ * semantics for interacting with the Stargate REST gateway.
+ */
+public class Client {
+ public static final Header[] EMPTY_HEADER_ARRAY = new Header[0];
+
+ private static final Log LOG = LogFactory.getLog(Client.class);
+
+ private HttpClient httpClient;
+ private Cluster cluster;
+
+ /**
+ * Default Constructor
+ */
+ public Client() {
+ this(null);
+ }
+
+ /**
+ * Constructor
+ * @param cluster the cluster definition
+ */
+ public Client(Cluster cluster) {
+ this.cluster = cluster;
+ httpClient = new HttpClient(new MultiThreadedHttpConnectionManager());
+ HttpConnectionManagerParams managerParams =
+ httpClient.getHttpConnectionManager().getParams();
+ managerParams.setConnectionTimeout(2000); // 2 s
+ HttpClientParams clientParams = httpClient.getParams();
+ clientParams.setVersion(HttpVersion.HTTP_1_1);
+ }
+
+ /**
+ * Shut down the client. Close any open persistent connections.
+ */
+ public void shutdown() {
+ MultiThreadedHttpConnectionManager manager =
+ (MultiThreadedHttpConnectionManager) httpClient.getHttpConnectionManager();
+ manager.shutdown();
+ }
+
+ /**
+ * Execute a transaction method given only the path. Will select at random
+ * one of the members of the supplied cluster definition and iterate through
+ * the list until a transaction can be successfully completed. The
+ * definition of success here is a complete HTTP transaction, irrespective
+ * of result code.
+ * @param cluster the cluster definition
+ * @param method the transaction method
+ * @param headers HTTP header values to send
+ * @param path the path
+ * @return the HTTP response code
+ * @throws IOException
+ */
+ @SuppressWarnings("deprecation")
+ public int executePathOnly(Cluster cluster, HttpMethod method,
+ Header[] headers, String path) throws IOException {
+ IOException lastException;
+ if (cluster.nodes.size() < 1) {
+ throw new IOException("Cluster is empty");
+ }
+ int start = (int)Math.round((cluster.nodes.size() - 1) * Math.random());
+ int i = start;
+ do {
+ cluster.lastHost = cluster.nodes.get(i);
+ try {
+ StringBuilder sb = new StringBuilder();
+ sb.append("http://");
+ sb.append(cluster.lastHost);
+ sb.append(path);
+ URI uri = new URI(sb.toString());
+ return executeURI(method, headers, uri.toString());
+ } catch (IOException e) {
+ lastException = e;
+ }
+ } while (++i != start && i < cluster.nodes.size());
+ throw lastException;
+ }
+
+ /**
+ * Execute a transaction method given a complete URI.
+ * @param method the transaction method
+ * @param headers HTTP header values to send
+ * @param uri the URI
+ * @return the HTTP response code
+ * @throws IOException
+ */
+ @SuppressWarnings("deprecation")
+ public int executeURI(HttpMethod method, Header[] headers, String uri)
+ throws IOException {
+ method.setURI(new URI(uri));
+ if (headers != null) {
+ for (Header header: headers) {
+ method.addRequestHeader(header);
+ }
+ }
+ long startTime = System.currentTimeMillis();
+ int code = httpClient.executeMethod(method);
+ long endTime = System.currentTimeMillis();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(method.getName() + " " + uri + ": " + code + " " +
+ method.getStatusText() + " in " + (endTime - startTime) + " ms");
+ }
+ return code;
+ }
+
+ /**
+ * Execute a transaction method. Will call either executePathOnly
+ * or executeURI depending on whether a path only is supplied in
+ * 'path', or if a complete URI is passed instead, respectively.
+ * @param cluster the cluster definition
+ * @param method the HTTP method
+ * @param headers HTTP header values to send
+ * @param path the path or URI
+ * @return the HTTP response code
+ * @throws IOException
+ */
+ public int execute(Cluster cluster, HttpMethod method, Header[] headers,
+ String path) throws IOException {
+ if (path.startsWith("/")) {
+ return executePathOnly(cluster, method, headers, path);
+ }
+ return executeURI(method, headers, path);
+ }
+
+ /**
+ * @return the cluster definition
+ */
+ public Cluster getCluster() {
+ return cluster;
+ }
+
+ /**
+ * @param cluster the cluster definition
+ */
+ public void setCluster(Cluster cluster) {
+ this.cluster = cluster;
+ }
+
+ /**
+ * Send a HEAD request
+ * @param path the path or URI
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response head(String path) throws IOException {
+ return head(cluster, path, null);
+ }
+
+ /**
+ * Send a HEAD request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param headers the HTTP headers to include in the request
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response head(Cluster cluster, String path, Header[] headers)
+ throws IOException {
+ HeadMethod method = new HeadMethod();
+ int code = execute(cluster, method, null, path);
+ headers = method.getResponseHeaders();
+ method.releaseConnection();
+ return new Response(code, headers, null);
+ }
+
+ /**
+ * Send a GET request
+ * @param path the path or URI
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(String path) throws IOException {
+ return get(cluster, path);
+ }
+
+ /**
+ * Send a GET request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(Cluster cluster, String path) throws IOException {
+ return get(cluster, path, EMPTY_HEADER_ARRAY);
+ }
+
+ /**
+ * Send a GET request
+ * @param path the path or URI
+ * @param accept Accept header value
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(String path, String accept) throws IOException {
+ return get(cluster, path, accept);
+ }
+
+ /**
+ * Send a GET request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param accept Accept header value
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(Cluster cluster, String path, String accept)
+ throws IOException {
+ Header[] headers = new Header[1];
+ headers[0] = new Header("Accept", accept);
+ return get(cluster, path, headers);
+ }
+
+ /**
+ * Send a GET request
+ * @param path the path or URI
+ * @param headers the HTTP headers to include in the request,
+ * Accept must be supplied
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(String path, Header[] headers) throws IOException {
+ return get(cluster, path, headers);
+ }
+
+ /**
+ * Send a GET request
+ * @param c the cluster definition
+ * @param path the path or URI
+ * @param headers the HTTP headers to include in the request
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response get(Cluster c, String path, Header[] headers)
+ throws IOException {
+ GetMethod method = new GetMethod();
+ int code = execute(c, method, headers, path);
+ headers = method.getResponseHeaders();
+ byte[] body = method.getResponseBody();
+ method.releaseConnection();
+ return new Response(code, headers, body);
+ }
+
+ /**
+ * Send a PUT request
+ * @param path the path or URI
+ * @param contentType the content MIME type
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response put(String path, String contentType, byte[] content)
+ throws IOException {
+ return put(cluster, path, contentType, content);
+ }
+
+ /**
+ * Send a PUT request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param contentType the content MIME type
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response put(Cluster cluster, String path, String contentType,
+ byte[] content) throws IOException {
+ Header[] headers = new Header[1];
+ headers[0] = new Header("Content-Type", contentType);
+ return put(cluster, path, headers, content);
+ }
+
+ /**
+ * Send a PUT request
+ * @param path the path or URI
+ * @param headers the HTTP headers to include, Content-Type must be
+ * supplied
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response put(String path, Header[] headers, byte[] content)
+ throws IOException {
+ return put(cluster, path, headers, content);
+ }
+
+ /**
+ * Send a PUT request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param headers the HTTP headers to include, Content-Type must be
+ * supplied
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response put(Cluster cluster, String path, Header[] headers,
+ byte[] content) throws IOException {
+ PutMethod method = new PutMethod();
+ method.setRequestEntity(new ByteArrayRequestEntity(content));
+ int code = execute(cluster, method, headers, path);
+ headers = method.getResponseHeaders();
+ content = method.getResponseBody();
+ method.releaseConnection();
+ return new Response(code, headers, content);
+ }
+
+ /**
+ * Send a POST request
+ * @param path the path or URI
+ * @param contentType the content MIME type
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response post(String path, String contentType, byte[] content)
+ throws IOException {
+ return post(cluster, path, contentType, content);
+ }
+
+ /**
+ * Send a POST request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param contentType the content MIME type
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response post(Cluster cluster, String path, String contentType,
+ byte[] content) throws IOException {
+ Header[] headers = new Header[1];
+ headers[0] = new Header("Content-Type", contentType);
+ return post(cluster, path, headers, content);
+ }
+
+ /**
+ * Send a POST request
+ * @param path the path or URI
+ * @param headers the HTTP headers to include, Content-Type must be
+ * supplied
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response post(String path, Header[] headers, byte[] content)
+ throws IOException {
+ return post(cluster, path, headers, content);
+ }
+
+ /**
+ * Send a POST request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @param headers the HTTP headers to include, Content-Type must be
+ * supplied
+ * @param content the content bytes
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response post(Cluster cluster, String path, Header[] headers,
+ byte[] content) throws IOException {
+ PostMethod method = new PostMethod();
+ method.setRequestEntity(new ByteArrayRequestEntity(content));
+ int code = execute(cluster, method, headers, path);
+ headers = method.getResponseHeaders();
+ content = method.getResponseBody();
+ method.releaseConnection();
+ return new Response(code, headers, content);
+ }
+
+ /**
+ * Send a DELETE request
+ * @param path the path or URI
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response delete(String path) throws IOException {
+ return delete(cluster, path);
+ }
+
+ /**
+ * Send a DELETE request
+ * @param cluster the cluster definition
+ * @param path the path or URI
+ * @return a Response object with response detail
+ * @throws IOException
+ */
+ public Response delete(Cluster cluster, String path) throws IOException {
+ DeleteMethod method = new DeleteMethod();
+ int code = execute(cluster, method, null, path);
+ Header[] headers = method.getResponseHeaders();
+ method.releaseConnection();
+ return new Response(code, headers);
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/client/Cluster.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/client/Cluster.java
new file mode 100644
index 0000000..2264256
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/client/Cluster.java
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.client;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * A list of 'host:port' addresses of HTTP servers operating as a single
+ * entity, for example multiple redundant web service gateways.
+ */
+public class Cluster {
+ protected List nodes =
+ Collections.synchronizedList(new ArrayList());
+ protected String lastHost;
+
+ /**
+ * Constructor
+ */
+ public Cluster() {}
+
+ /**
+ * Constructor
+ * @param nodes a list of service locations, in 'host:port' format
+ */
+ public Cluster(List nodes) {
+ nodes.addAll(nodes);
+ }
+
+ /**
+ * Add a node to the cluster
+ * @param node the service location in 'host:port' format
+ */
+ public Cluster add(String node) {
+ nodes.add(node);
+ return this;
+ }
+
+ /**
+ * Add a node to the cluster
+ * @param name host name
+ * @param port service port
+ */
+ public Cluster add(String name, int port) {
+ StringBuilder sb = new StringBuilder();
+ sb.append(name);
+ sb.append(':');
+ sb.append(port);
+ return add(sb.toString());
+ }
+
+ /**
+ * Remove a node from the cluster
+ * @param node the service location in 'host:port' format
+ */
+ public Cluster remove(String node) {
+ nodes.remove(node);
+ return this;
+ }
+
+ /**
+ * Remove a node from the cluster
+ * @param name host name
+ * @param port service port
+ */
+ public Cluster remove(String name, int port) {
+ StringBuilder sb = new StringBuilder();
+ sb.append(name);
+ sb.append(':');
+ sb.append(port);
+ return remove(sb.toString());
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/client/Response.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/client/Response.java
new file mode 100644
index 0000000..11637a4
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/client/Response.java
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.client;
+
+import org.apache.commons.httpclient.Header;
+
+/**
+ * The HTTP result code, response headers, and body of a HTTP response.
+ */
+public class Response {
+ private int code;
+ private Header[] headers;
+ private byte[] body;
+
+ /**
+ * Constructor
+ * @param code the HTTP response code
+ */
+ public Response(int code) {
+ this(code, null, null);
+ }
+
+ /**
+ * Constructor
+ * @param code the HTTP response code
+ * @param headers the HTTP response headers
+ */
+ public Response(int code, Header[] headers) {
+ this(code, headers, null);
+ }
+
+ /**
+ * Constructor
+ * @param code the HTTP response code
+ * @param headers the HTTP response headers
+ * @param body the response body, can be null
+ */
+ public Response(int code, Header[] headers, byte[] body) {
+ this.code = code;
+ this.headers = headers;
+ this.body = body;
+ }
+
+ /**
+ * @return the HTTP response code
+ */
+ public int getCode() {
+ return code;
+ }
+
+ /**
+ * @return the HTTP response headers
+ */
+ public Header[] getHeaders() {
+ return headers;
+ }
+
+ /**
+ * @return the value of the Location header
+ */
+ public String getLocation() {
+ for (Header header: headers) {
+ if (header.getName().equals("Location")) {
+ return header.getValue();
+ }
+ }
+ return null;
+ }
+
+ /**
+ * @return true if a response body was sent
+ */
+ public boolean hasBody() {
+ return body != null;
+ }
+
+ /**
+ * @return the HTTP response body
+ */
+ public byte[] getBody() {
+ return body;
+ }
+
+ /**
+ * @param code the HTTP response code
+ */
+ public void setCode(int code) {
+ this.code = code;
+ }
+
+ /**
+ * @param headers the HTTP response headers
+ */
+ public void setHeaders(Header[] headers) {
+ this.headers = headers;
+ }
+
+ /**
+ * @param body the response body
+ */
+ public void setBody(byte[] body) {
+ this.body = body;
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/metrics/StargateMetrics.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/metrics/StargateMetrics.java
new file mode 100644
index 0000000..a53988b
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/metrics/StargateMetrics.java
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.metrics;
+
+import org.apache.hadoop.hbase.metrics.MetricsRate;
+
+import org.apache.hadoop.metrics.MetricsContext;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.metrics.MetricsUtil;
+import org.apache.hadoop.metrics.Updater;
+import org.apache.hadoop.metrics.jvm.JvmMetrics;
+import org.apache.hadoop.metrics.util.MetricsRegistry;
+
+public class StargateMetrics implements Updater {
+ private final MetricsRecord metricsRecord;
+ private final MetricsRegistry registry = new MetricsRegistry();
+ private final StargateStatistics stargateStatistics;
+
+ private MetricsRate requests = new MetricsRate("requests", registry);
+
+ public StargateMetrics() {
+ MetricsContext context = MetricsUtil.getContext("stargate");
+ metricsRecord = MetricsUtil.createRecord(context, "stargate");
+ String name = Thread.currentThread().getName();
+ metricsRecord.setTag("Master", name);
+ context.registerUpdater(this);
+ JvmMetrics.init("Stargate", name);
+ // expose the MBean for metrics
+ stargateStatistics = new StargateStatistics(registry);
+
+ }
+
+ public void shutdown() {
+ if (stargateStatistics != null) {
+ stargateStatistics.shutdown();
+ }
+ }
+
+ /**
+ * Since this object is a registered updater, this method will be called
+ * periodically, e.g. every 5 seconds.
+ * @param unused
+ */
+ public void doUpdates(MetricsContext unused) {
+ synchronized (this) {
+ requests.pushMetric(metricsRecord);
+ }
+ this.metricsRecord.update();
+ }
+
+ public void resetAllMinMax() {
+ // Nothing to do
+ }
+
+ /**
+ * @return Count of requests.
+ */
+ public float getRequests() {
+ return requests.getPreviousIntervalValue();
+ }
+
+ /**
+ * @param inc How much to add to requests.
+ */
+ public void incrementRequests(final int inc) {
+ requests.inc(inc);
+ }
+
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/metrics/StargateStatistics.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/metrics/StargateStatistics.java
new file mode 100644
index 0000000..d3f874a
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/metrics/StargateStatistics.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.metrics;
+
+import javax.management.ObjectName;
+
+import org.apache.hadoop.hbase.metrics.MetricsMBeanBase;
+
+import org.apache.hadoop.metrics.util.MBeanUtil;
+import org.apache.hadoop.metrics.util.MetricsRegistry;
+
+public class StargateStatistics extends MetricsMBeanBase {
+ private final ObjectName mbeanName;
+
+ public StargateStatistics(MetricsRegistry registry) {
+ super(registry, "StargateStatistics");
+ mbeanName = MBeanUtil.registerMBean("Stargate",
+ "StargateStatistics", this);
+ }
+
+ public void shutdown() {
+ if (mbeanName != null) {
+ MBeanUtil.unregisterMBean(mbeanName);
+ }
+ }
+
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/CellModel.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/CellModel.java
new file mode 100644
index 0000000..284ec0f
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/CellModel.java
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlType;
+import javax.xml.bind.annotation.XmlValue;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell;
+
+import com.google.protobuf.ByteString;
+
+/**
+ * Representation of a cell. A cell is a single value associated a column and
+ * optional qualifier, and either the timestamp when it was stored or the user-
+ * provided timestamp if one was explicitly supplied.
+ *
+ *
+ * <complexType name="Cell">
+ * <sequence>
+ * <element name="value" maxOccurs="1" minOccurs="1">
+ * <simpleType>
+ * <restriction base="base64Binary"/>
+ * </simpleType>
+ * </element>
+ * </sequence>
+ * <attribute name="column" type="base64Binary" />
+ * <attribute name="timestamp" type="int" />
+ * </complexType>
+ *
+ */
+@XmlRootElement(name="Cell")
+@XmlType(propOrder={"column","timestamp"})
+public class CellModel implements ProtobufMessageHandler, Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private long timestamp = HConstants.LATEST_TIMESTAMP;
+ private byte[] column;
+ private byte[] value;
+
+ /**
+ * Default constructor
+ */
+ public CellModel() {}
+
+ /**
+ * Constructor
+ * @param column
+ * @param value
+ */
+ public CellModel(byte[] column, byte[] value) {
+ this(column, HConstants.LATEST_TIMESTAMP, value);
+ }
+
+ /**
+ * Constructor
+ * @param column
+ * @param qualifier
+ * @param value
+ */
+ public CellModel(byte[] column, byte[] qualifier, byte[] value) {
+ this(column, qualifier, HConstants.LATEST_TIMESTAMP, value);
+ }
+
+ /**
+ * Constructor from KeyValue
+ * @param kv
+ */
+ public CellModel(KeyValue kv) {
+ this(kv.getFamily(), kv.getQualifier(), kv.getTimestamp(), kv.getValue());
+ }
+
+ /**
+ * Constructor
+ * @param column
+ * @param timestamp
+ * @param value
+ */
+ public CellModel(byte[] column, long timestamp, byte[] value) {
+ this.column = column;
+ this.timestamp = timestamp;
+ this.value = value;
+ }
+
+ /**
+ * Constructor
+ * @param column
+ * @param qualifier
+ * @param timestamp
+ * @param value
+ */
+ public CellModel(byte[] column, byte[] qualifier, long timestamp,
+ byte[] value) {
+ this.column = KeyValue.makeColumn(column, qualifier);
+ this.timestamp = timestamp;
+ this.value = value;
+ }
+
+ /**
+ * @return the column
+ */
+ @XmlAttribute
+ public byte[] getColumn() {
+ return column;
+ }
+
+ /**
+ * @param column the column to set
+ */
+ public void setColumn(byte[] column) {
+ this.column = column;
+ }
+
+ /**
+ * @return true if the timestamp property has been specified by the
+ * user
+ */
+ public boolean hasUserTimestamp() {
+ return timestamp != HConstants.LATEST_TIMESTAMP;
+ }
+
+ /**
+ * @return the timestamp
+ */
+ @XmlAttribute
+ public long getTimestamp() {
+ return timestamp;
+ }
+
+ /**
+ * @param timestamp the timestamp to set
+ */
+ public void setTimestamp(long timestamp) {
+ this.timestamp = timestamp;
+ }
+
+ /**
+ * @return the value
+ */
+ @XmlValue
+ public byte[] getValue() {
+ return value;
+ }
+
+ /**
+ * @param value the value to set
+ */
+ public void setValue(byte[] value) {
+ this.value = value;
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ Cell.Builder builder = Cell.newBuilder();
+ builder.setColumn(ByteString.copyFrom(getColumn()));
+ builder.setData(ByteString.copyFrom(getValue()));
+ if (hasUserTimestamp()) {
+ builder.setTimestamp(getTimestamp());
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ Cell.Builder builder = Cell.newBuilder();
+ builder.mergeFrom(message);
+ setColumn(builder.getColumn().toByteArray());
+ setValue(builder.getData().toByteArray());
+ if (builder.hasTimestamp()) {
+ setTimestamp(builder.getTimestamp());
+ }
+ return this;
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/CellSetModel.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/CellSetModel.java
new file mode 100644
index 0000000..7b9613f
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/CellSetModel.java
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlElement;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell;
+import org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet;
+
+import com.google.protobuf.ByteString;
+
+/**
+ * Representation of a grouping of cells. May contain cells from more than
+ * one row. Encapsulates RowModel and CellModel models.
+ *
+ *
+ * <complexType name="CellSet">
+ * <sequence>
+ * <element name="row" type="tns:Row" maxOccurs="unbounded"
+ * minOccurs="1"></element>
+ * </sequence>
+ * </complexType>
+ *
+ * <complexType name="Row">
+ * <sequence>
+ * <element name="key" type="base64Binary"></element>
+ * <element name="cell" type="tns:Cell"
+ * maxOccurs="unbounded" minOccurs="1"></element>
+ * </sequence>
+ * </complexType>
+ *
+ * <complexType name="Cell">
+ * <sequence>
+ * <element name="value" maxOccurs="1" minOccurs="1">
+ * <simpleType>
+ * <restriction base="base64Binary"/>
+ * </simpleType>
+ * </element>
+ * </sequence>
+ * <attribute name="column" type="base64Binary" />
+ * <attribute name="timestamp" type="int" />
+ * </complexType>
+ *
+ */
+@XmlRootElement(name="CellSet")
+public class CellSetModel implements Serializable, ProtobufMessageHandler {
+
+ private static final long serialVersionUID = 1L;
+
+ private List rows;
+
+ /**
+ * Constructor
+ */
+ public CellSetModel() {
+ this.rows = new ArrayList();
+ }
+
+ /**
+ * @param rows the rows
+ */
+ public CellSetModel(List rows) {
+ super();
+ this.rows = rows;
+ }
+
+ /**
+ * Add a row to this cell set
+ * @param row the row
+ */
+ public void addRow(RowModel row) {
+ rows.add(row);
+ }
+
+ /**
+ * @return the rows
+ */
+ @XmlElement(name="Row")
+ public List getRows() {
+ return rows;
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ CellSet.Builder builder = CellSet.newBuilder();
+ for (RowModel row: getRows()) {
+ CellSet.Row.Builder rowBuilder = CellSet.Row.newBuilder();
+ rowBuilder.setKey(ByteString.copyFrom(row.getKey()));
+ for (CellModel cell: row.getCells()) {
+ Cell.Builder cellBuilder = Cell.newBuilder();
+ cellBuilder.setColumn(ByteString.copyFrom(cell.getColumn()));
+ cellBuilder.setData(ByteString.copyFrom(cell.getValue()));
+ if (cell.hasUserTimestamp()) {
+ cellBuilder.setTimestamp(cell.getTimestamp());
+ }
+ rowBuilder.addValues(cellBuilder);
+ }
+ builder.addRows(rowBuilder);
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ CellSet.Builder builder = CellSet.newBuilder();
+ builder.mergeFrom(message);
+ for (CellSet.Row row: builder.getRowsList()) {
+ RowModel rowModel = new RowModel(row.getKey().toByteArray());
+ for (Cell cell: row.getValuesList()) {
+ long timestamp = HConstants.LATEST_TIMESTAMP;
+ if (cell.hasTimestamp()) {
+ timestamp = cell.getTimestamp();
+ }
+ rowModel.addCell(
+ new CellModel(cell.getColumn().toByteArray(), timestamp,
+ cell.getData().toByteArray()));
+ }
+ addRow(rowModel);
+ }
+ return this;
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/ColumnSchemaModel.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/ColumnSchemaModel.java
new file mode 100644
index 0000000..00ec54d
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/ColumnSchemaModel.java
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.xml.bind.annotation.XmlAnyAttribute;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlType;
+import javax.xml.namespace.QName;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+
+/**
+ * Representation of a column family schema.
+ *
+ *
+ * <complexType name="ColumnSchema">
+ * <attribute name="name" type="string"></attribute>
+ * <anyAttribute></anyAttribute>
+ * </complexType>
+ *
+ */
+@XmlRootElement(name="ColumnSchema")
+@XmlType(propOrder = {"name"})
+public class ColumnSchemaModel implements Serializable {
+ private static final long serialVersionUID = 1L;
+ private static QName BLOCKCACHE = new QName(HColumnDescriptor.BLOCKCACHE);
+ private static QName BLOCKSIZE = new QName(HColumnDescriptor.BLOCKSIZE);
+ private static QName BLOOMFILTER = new QName(HColumnDescriptor.BLOOMFILTER);
+ private static QName COMPRESSION = new QName(HColumnDescriptor.COMPRESSION);
+ private static QName IN_MEMORY = new QName(HConstants.IN_MEMORY);
+ private static QName TTL = new QName(HColumnDescriptor.TTL);
+ private static QName VERSIONS = new QName(HConstants.VERSIONS);
+
+ private String name;
+ private Map attrs = new HashMap();
+
+ /**
+ * Default constructor
+ */
+ public ColumnSchemaModel() {}
+
+ /**
+ * Add an attribute to the column family schema
+ * @param name the attribute name
+ * @param value the attribute value
+ */
+ public void addAttribute(String name, Object value) {
+ attrs.put(new QName(name), value);
+ }
+
+ /**
+ * @param name the attribute name
+ * @return the attribute value
+ */
+ public String getAttribute(String name) {
+ Object o = attrs.get(new QName(name));
+ return o != null ? o.toString(): null;
+ }
+
+ /**
+ * @return the column name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return the map for holding unspecified (user) attributes
+ */
+ @XmlAnyAttribute
+ public Map getAny() {
+ return attrs;
+ }
+
+ /**
+ * @param name the table name
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("{ NAME => '");
+ sb.append(name);
+ sb.append('\'');
+ for (Map.Entry e: attrs.entrySet()) {
+ sb.append(", ");
+ sb.append(e.getKey().getLocalPart());
+ sb.append(" => '");
+ sb.append(e.getValue().toString());
+ sb.append('\'');
+ }
+ sb.append(" }");
+ return sb.toString();
+ }
+
+ // getters and setters for common schema attributes
+
+ // cannot be standard bean type getters and setters, otherwise this would
+ // confuse JAXB
+
+ /**
+ * @return true if the BLOCKCACHE attribute is present and true
+ */
+ public boolean __getBlockcache() {
+ Object o = attrs.get(BLOCKCACHE);
+ return o != null ?
+ Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKCACHE;
+ }
+
+ /**
+ * @return the value of the BLOCKSIZE attribute or its default if it is unset
+ */
+ public int __getBlocksize() {
+ Object o = attrs.get(BLOCKSIZE);
+ return o != null ?
+ Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKSIZE;
+ }
+
+ /**
+ * @return true if the BLOOMFILTER attribute is present and true
+ */
+ public boolean __getBloomfilter() {
+ Object o = attrs.get(BLOOMFILTER);
+ return o != null ?
+ Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOOMFILTER;
+ }
+
+ /**
+ * @return the value of the COMPRESSION attribute or its default if it is unset
+ */
+ public String __getCompression() {
+ Object o = attrs.get(COMPRESSION);
+ return o != null ? o.toString() : HColumnDescriptor.DEFAULT_COMPRESSION;
+ }
+
+ /**
+ * @return true if the IN_MEMORY attribute is present and true
+ */
+ public boolean __getInMemory() {
+ Object o = attrs.get(IN_MEMORY);
+ return o != null ?
+ Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_IN_MEMORY;
+ }
+
+ /**
+ * @return the value of the TTL attribute or its default if it is unset
+ */
+ public int __getTTL() {
+ Object o = attrs.get(TTL);
+ return o != null ?
+ Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_TTL;
+ }
+
+ /**
+ * @return the value of the VERSIONS attribute or its default if it is unset
+ */
+ public int __getVersions() {
+ Object o = attrs.get(VERSIONS);
+ return o != null ?
+ Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_VERSIONS;
+ }
+
+ /**
+ * @param value the desired value of the BLOCKSIZE attribute
+ */
+ public void __setBlocksize(int value) {
+ attrs.put(BLOCKSIZE, Integer.toString(value));
+ }
+
+ /**
+ * @param value the desired value of the BLOCKCACHE attribute
+ */
+ public void __setBlockcache(boolean value) {
+ attrs.put(BLOCKCACHE, Boolean.toString(value));
+ }
+
+ public void __setBloomfilter(boolean value) {
+ attrs.put(BLOOMFILTER, Boolean.toString(value));
+ }
+
+ /**
+ * @param value the desired value of the COMPRESSION attribute
+ */
+ public void __setCompression(String value) {
+ attrs.put(COMPRESSION, value);
+ }
+
+ /**
+ * @param value the desired value of the IN_MEMORY attribute
+ */
+ public void __setInMemory(boolean value) {
+ attrs.put(IN_MEMORY, Boolean.toString(value));
+ }
+
+ /**
+ * @param value the desired value of the TTL attribute
+ */
+ public void __setTTL(int value) {
+ attrs.put(TTL, Integer.toString(value));
+ }
+
+ /**
+ * @param value the desired value of the VERSIONS attribute
+ */
+ public void __setVersions(int value) {
+ attrs.put(VERSIONS, Integer.toString(value));
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/RowModel.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/RowModel.java
new file mode 100644
index 0000000..7fd2aab
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/RowModel.java
@@ -0,0 +1,142 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
+
+/**
+ * Representation of a row. A row is a related set of cells, grouped by common
+ * row key. RowModels do not appear in results by themselves. They are always
+ * encapsulated within CellSetModels.
+ *
+ *
+ * <complexType name="Row">
+ * <sequence>
+ * <element name="key" type="base64Binary"></element>
+ * <element name="cell" type="tns:Cell"
+ * maxOccurs="unbounded" minOccurs="1"></element>
+ * </sequence>
+ * </complexType>
+ *
+ */
+@XmlRootElement(name="Row")
+public class RowModel implements ProtobufMessageHandler, Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private byte[] key;
+ private List cells = new ArrayList();
+
+ /**
+ * Default constructor
+ */
+ public RowModel() { }
+
+ /**
+ * Constructor
+ * @param key the row key
+ */
+ public RowModel(final String key) {
+ this(key.getBytes());
+ }
+
+ /**
+ * Constructor
+ * @param key the row key
+ */
+ public RowModel(final byte[] key) {
+ this.key = key;
+ cells = new ArrayList();
+ }
+
+ /**
+ * Constructor
+ * @param key the row key
+ * @param cells the cells
+ */
+ public RowModel(final String key, final List cells) {
+ this(key.getBytes(), cells);
+ }
+
+ /**
+ * Constructor
+ * @param key the row key
+ * @param cells the cells
+ */
+ public RowModel(final byte[] key, final List cells) {
+ this.key = key;
+ this.cells = cells;
+ }
+
+ /**
+ * Adds a cell to the list of cells for this row
+ * @param cell the cell
+ */
+ public void addCell(CellModel cell) {
+ cells.add(cell);
+ }
+
+ /**
+ * @return the row key
+ */
+ @XmlAttribute
+ public byte[] getKey() {
+ return key;
+ }
+
+ /**
+ * @param key the row key
+ */
+ public void setKey(byte[] key) {
+ this.key = key;
+ }
+
+ /**
+ * @return the cells
+ */
+ @XmlElement(name="Cell")
+ public List getCells() {
+ return cells;
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ // there is no standalone row protobuf message
+ throw new UnsupportedOperationException(
+ "no protobuf equivalent to RowModel");
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ // there is no standalone row protobuf message
+ throw new UnsupportedOperationException(
+ "no protobuf equivalent to RowModel");
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/ScannerModel.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/ScannerModel.java
new file mode 100644
index 0000000..b9501db
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/ScannerModel.java
@@ -0,0 +1,608 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
+import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.InclusiveStopFilter;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.hbase.filter.RegexStringComparator;
+import org.apache.hadoop.hbase.filter.RowFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.apache.hadoop.hbase.filter.SkipFilter;
+import org.apache.hadoop.hbase.filter.SubstringComparator;
+import org.apache.hadoop.hbase.filter.ValueFilter;
+import org.apache.hadoop.hbase.filter.WhileMatchFilter;
+import org.apache.hadoop.hbase.filter.WritableByteArrayComparable;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner;
+import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.json.JSONArray;
+import org.json.JSONObject;
+import org.json.JSONStringer;
+
+import com.google.protobuf.ByteString;
+
+/**
+ * A representation of Scanner parameters.
+ *
+ *
+ * <complexType name="Scanner">
+ * <sequence>
+ * <element name="column" type="base64Binary" minOccurs="0" maxOccurs="unbounded"/>
+ * </sequence>
+ * <element name="filter" type="string" minOccurs="0" maxOccurs="1"></element>
+ * <attribute name="startRow" type="base64Binary"></attribute>
+ * <attribute name="endRow" type="base64Binary"></attribute>
+ * <attribute name="batch" type="int"></attribute>
+ * <attribute name="startTime" type="int"></attribute>
+ * <attribute name="endTime" type="int"></attribute>
+ * <attribute name="maxVersions" type="int"></attribute>
+ * </complexType>
+ *
+ */
+@XmlRootElement(name="Scanner")
+public class ScannerModel implements ProtobufMessageHandler, Serializable {
+
+ static enum FilterType {
+ ColumnCountGetFilter,
+ FilterList,
+ FirstKeyOnlyFilter,
+ InclusiveStopFilter,
+ PageFilter,
+ PrefixFilter,
+ QualifierFilter,
+ RowFilter,
+ SingleColumnValueFilter,
+ SkipFilter,
+ ValueFilter,
+ WhileMatchFilter
+ }
+
+ static enum ComparatorType {
+ BinaryComparator,
+ BinaryPrefixComparator,
+ RegexStringComparator,
+ SubstringComparator
+ }
+
+ private static final long serialVersionUID = 1L;
+
+ private byte[] startRow = HConstants.EMPTY_START_ROW;
+ private byte[] endRow = HConstants.EMPTY_END_ROW;;
+ private List columns = new ArrayList();
+ private int batch = 1;
+ private long startTime = 0;
+ private long endTime = Long.MAX_VALUE;
+ private String filter;
+ private int maxVersions = 1;
+
+ /**
+ * @param o the JSONObject under construction
+ * @return the JSONObject under construction
+ * @throws Exception
+ */
+ public static WritableByteArrayComparable
+ buildWritableByteArrayComparable(final JSONObject o) throws Exception {
+ String type = o.getString("type");
+ String value = o.getString("value");
+ WritableByteArrayComparable comparator;
+ switch (ComparatorType.valueOf(type)) {
+ case BinaryComparator: {
+ comparator = new BinaryComparator(Base64.decode(value));
+ } break;
+ case BinaryPrefixComparator: {
+ comparator = new BinaryPrefixComparator(Base64.decode(value));
+ } break;
+ case RegexStringComparator: {
+ comparator = new RegexStringComparator(value);
+ } break;
+ case SubstringComparator: {
+ comparator = new SubstringComparator(value);
+ } break;
+ default: {
+ throw new RuntimeException("unhandled comparator type: " + type);
+ }
+ }
+ return comparator;
+ }
+
+ /**
+ * @param o the JSONObject under construction
+ * @return the JSONObject under construction
+ * @throws Exception
+ */
+ public static Filter buildFilter(final JSONObject o) throws Exception {
+ String type = o.getString("type");
+ Filter filter;
+ switch (FilterType.valueOf(type)) {
+ case ColumnCountGetFilter: {
+ filter = new ColumnCountGetFilter(o.getInt("limit"));
+ } break;
+ case FilterList: {
+ JSONArray arr = o.getJSONArray("filters");
+ List filters = new ArrayList(arr.length());
+ for (int i = 0; i < arr.length(); i++) {
+ filters.add(buildFilter(arr.getJSONObject(i)));
+ }
+ filter = new FilterList(
+ FilterList.Operator.valueOf(o.getString("op")),
+ filters);
+ } break;
+ case FirstKeyOnlyFilter: {
+ filter = new FirstKeyOnlyFilter();
+ } break;
+ case InclusiveStopFilter: {
+ filter = new InclusiveStopFilter(Base64.decode(o.getString("value")));
+ } break;
+ case PageFilter: {
+ filter = new PageFilter(o.getLong("value"));
+ } break;
+ case PrefixFilter: {
+ filter = new PrefixFilter(Base64.decode(o.getString("value")));
+ } break;
+ case QualifierFilter: {
+ filter = new QualifierFilter(CompareOp.valueOf(o.getString("op")),
+ buildWritableByteArrayComparable(o.getJSONObject("comparator")));
+ } break;
+ case RowFilter: {
+ filter = new RowFilter(CompareOp.valueOf(o.getString("op")),
+ buildWritableByteArrayComparable(o.getJSONObject("comparator")));
+ } break;
+ case SingleColumnValueFilter: {
+ filter = new SingleColumnValueFilter(
+ Base64.decode(o.getString("family")),
+ o.has("qualifier") ? Base64.decode(o.getString("qualifier")) : null,
+ CompareOp.valueOf(o.getString("op")),
+ buildWritableByteArrayComparable(o.getJSONObject("comparator")));
+ if (o.has("ifMissing")) {
+ ((SingleColumnValueFilter)filter)
+ .setFilterIfMissing(o.getBoolean("ifMissing"));
+ }
+ if (o.has("latestVersion")) {
+ ((SingleColumnValueFilter)filter)
+ .setLatestVersionOnly(o.getBoolean("latestVersion"));
+ }
+ } break;
+ case SkipFilter: {
+ filter = new SkipFilter(buildFilter(o.getJSONObject("filter")));
+ } break;
+ case ValueFilter: {
+ filter = new ValueFilter(CompareOp.valueOf(o.getString("op")),
+ buildWritableByteArrayComparable(o.getJSONObject("comparator")));
+ } break;
+ case WhileMatchFilter: {
+ filter = new WhileMatchFilter(buildFilter(o.getJSONObject("filter")));
+ } break;
+ default: {
+ throw new RuntimeException("unhandled filter type: " + type);
+ }
+ }
+ return filter;
+ }
+
+ /**
+ * @param s the JSONStringer
+ * @param comparator the comparator
+ * @return the JSONStringer
+ * @throws Exception
+ */
+ public static JSONStringer stringifyComparator(final JSONStringer s,
+ final WritableByteArrayComparable comparator) throws Exception {
+ String typeName = comparator.getClass().getSimpleName();
+ ComparatorType type = ComparatorType.valueOf(typeName);
+ s.object();
+ s.key("type").value(typeName);
+ switch (type) {
+ case BinaryComparator:
+ case BinaryPrefixComparator:
+ s.key("value").value(Base64.encodeBytes(comparator.getValue()));
+ break;
+ case RegexStringComparator:
+ case SubstringComparator:
+ s.key("value").value(Bytes.toString(comparator.getValue()));
+ break;
+ default:
+ throw new RuntimeException("unhandled filter type: " + type);
+ }
+ s.endObject();
+ return s;
+ }
+
+ /**
+ * @param s the JSONStringer
+ * @param filter the filter
+ * @return the JSONStringer
+ * @throws Exception
+ */
+ public static JSONStringer stringifyFilter(final JSONStringer s,
+ final Filter filter) throws Exception {
+ String typeName = filter.getClass().getSimpleName();
+ FilterType type;
+ try {
+ type = FilterType.valueOf(typeName);
+ } catch (IllegalArgumentException e) {
+ throw new RuntimeException("filter type " + typeName + " not supported");
+ }
+ s.object();
+ s.key("type").value(typeName);
+ switch (type) {
+ case ColumnCountGetFilter:
+ s.key("limit").value(((ColumnCountGetFilter)filter).getLimit());
+ break;
+ case FilterList:
+ s.key("op").value(((FilterList)filter).getOperator().toString());
+ s.key("filters").array();
+ for (Filter child: ((FilterList)filter).getFilters()) {
+ stringifyFilter(s, child);
+ }
+ s.endArray();
+ break;
+ case FirstKeyOnlyFilter:
+ break;
+ case InclusiveStopFilter:
+ s.key("value").value(
+ Base64.encodeBytes(((InclusiveStopFilter)filter).getStopRowKey()));
+ break;
+ case PageFilter:
+ s.key("value").value(((PageFilter)filter).getPageSize());
+ break;
+ case PrefixFilter:
+ s.key("value")
+ .value(Base64.encodeBytes(((PrefixFilter)filter).getPrefix()));
+ break;
+ case QualifierFilter:
+ case RowFilter:
+ case ValueFilter:
+ s.key("op").value(((CompareFilter)filter).getOperator().toString());
+ s.key("comparator");
+ stringifyComparator(s, ((CompareFilter)filter).getComparator());
+ break;
+ case SingleColumnValueFilter: {
+ SingleColumnValueFilter scvf = (SingleColumnValueFilter) filter;
+ s.key("family").value(scvf.getFamily());
+ byte[] qualifier = scvf.getQualifier();
+ if (qualifier != null) {
+ s.key("qualifier").value(qualifier);
+ }
+ s.key("op").value(scvf.getOperator().toString());
+ s.key("comparator");
+ stringifyComparator(s, scvf.getComparator());
+ if (scvf.getFilterIfMissing()) {
+ s.key("ifMissing").value(true);
+ }
+ if (scvf.getLatestVersionOnly()) {
+ s.key("latestVersion").value(true);
+ }
+ } break;
+ case SkipFilter:
+ s.key("filter");
+ stringifyFilter(s, ((SkipFilter)filter).getFilter());
+ break;
+ case WhileMatchFilter:
+ s.key("filter");
+ stringifyFilter(s, ((WhileMatchFilter)filter).getFilter());
+ break;
+ }
+ s.endObject();
+ return s;
+ }
+
+ /**
+ * @param scan the scan specification
+ * @throws Exception
+ */
+ public static ScannerModel fromScan(Scan scan) throws Exception {
+ ScannerModel model = new ScannerModel();
+ model.setStartRow(scan.getStartRow());
+ model.setEndRow(scan.getStopRow());
+ byte[][] families = scan.getFamilies();
+ if (families != null) {
+ for (byte[] column: families) {
+ model.addColumn(column);
+ }
+ }
+ model.setStartTime(scan.getTimeRange().getMin());
+ model.setEndTime(scan.getTimeRange().getMax());
+ model.setBatch(scan.getCaching());
+ model.setMaxVersions(scan.getMaxVersions());
+ Filter filter = scan.getFilter();
+ if (filter != null) {
+ model.setFilter(stringifyFilter(new JSONStringer(), filter).toString());
+ }
+ return model;
+ }
+
+ /**
+ * Default constructor
+ */
+ public ScannerModel() {}
+
+ /**
+ * Constructor
+ * @param startRow the start key of the row-range
+ * @param endRow the end key of the row-range
+ * @param columns the columns to scan
+ * @param batch the number of values to return in batch
+ * @param endTime the upper bound on timestamps of values of interest
+ * @param maxVersions the maximum number of versions to return
+ * @param filter a filter specification
+ * (values with timestamps later than this are excluded)
+ */
+ public ScannerModel(byte[] startRow, byte[] endRow, List columns,
+ int batch, long endTime, int maxVersions, String filter) {
+ super();
+ this.startRow = startRow;
+ this.endRow = endRow;
+ this.columns = columns;
+ this.batch = batch;
+ this.endTime = endTime;
+ this.maxVersions = maxVersions;
+ this.filter = filter;
+ }
+
+ /**
+ * Constructor
+ * @param startRow the start key of the row-range
+ * @param endRow the end key of the row-range
+ * @param columns the columns to scan
+ * @param batch the number of values to return in batch
+ * @param startTime the lower bound on timestamps of values of interest
+ * (values with timestamps earlier than this are excluded)
+ * @param endTime the upper bound on timestamps of values of interest
+ * (values with timestamps later than this are excluded)
+ * @param filter a filter specification
+ */
+ public ScannerModel(byte[] startRow, byte[] endRow, List columns,
+ int batch, long startTime, long endTime, String filter) {
+ super();
+ this.startRow = startRow;
+ this.endRow = endRow;
+ this.columns = columns;
+ this.batch = batch;
+ this.startTime = startTime;
+ this.endTime = endTime;
+ this.filter = filter;
+ }
+
+ /**
+ * Add a column to the column set
+ * @param column the column name, as <column>(:<qualifier>)?
+ */
+ public void addColumn(byte[] column) {
+ columns.add(column);
+ }
+
+ /**
+ * @return true if a start row was specified
+ */
+ public boolean hasStartRow() {
+ return !Bytes.equals(startRow, HConstants.EMPTY_START_ROW);
+ }
+
+ /**
+ * @return start row
+ */
+ @XmlAttribute
+ public byte[] getStartRow() {
+ return startRow;
+ }
+
+ /**
+ * @return true if an end row was specified
+ */
+ public boolean hasEndRow() {
+ return !Bytes.equals(endRow, HConstants.EMPTY_END_ROW);
+ }
+
+ /**
+ * @return end row
+ */
+ @XmlAttribute
+ public byte[] getEndRow() {
+ return endRow;
+ }
+
+ /**
+ * @return list of columns of interest in column:qualifier format, or empty for all
+ */
+ @XmlElement(name="column")
+ public List getColumns() {
+ return columns;
+ }
+
+ /**
+ * @return the number of cells to return in batch
+ */
+ @XmlAttribute
+ public int getBatch() {
+ return batch;
+ }
+
+ /**
+ * @return the lower bound on timestamps of items of interest
+ */
+ @XmlAttribute
+ public long getStartTime() {
+ return startTime;
+ }
+
+ /**
+ * @return the upper bound on timestamps of items of interest
+ */
+ @XmlAttribute
+ public long getEndTime() {
+ return endTime;
+ }
+
+ /**
+ * @return maximum number of versions to return
+ */
+ @XmlAttribute
+ public int getMaxVersions() {
+ return maxVersions;
+ }
+
+ /**
+ * @return the filter specification
+ */
+ @XmlElement
+ public String getFilter() {
+ return filter;
+ }
+
+ /**
+ * @param startRow start row
+ */
+ public void setStartRow(byte[] startRow) {
+ this.startRow = startRow;
+ }
+
+ /**
+ * @param endRow end row
+ */
+ public void setEndRow(byte[] endRow) {
+ this.endRow = endRow;
+ }
+
+ /**
+ * @param columns list of columns of interest in column:qualifier format, or empty for all
+ */
+ public void setColumns(List columns) {
+ this.columns = columns;
+ }
+
+ /**
+ * @param batch the number of cells to return in batch
+ */
+ public void setBatch(int batch) {
+ this.batch = batch;
+ }
+
+ /**
+ * @param maxVersions maximum number of versions to return
+ */
+ public void setMaxVersions(int maxVersions) {
+ this.maxVersions = maxVersions;
+ }
+
+ /**
+ * @param startTime the lower bound on timestamps of values of interest
+ */
+ public void setStartTime(long startTime) {
+ this.startTime = startTime;
+ }
+
+ /**
+ * @param endTime the upper bound on timestamps of values of interest
+ */
+ public void setEndTime(long endTime) {
+ this.endTime = endTime;
+ }
+
+ /**
+ * @param filter the filter specification
+ */
+ public void setFilter(String filter) {
+ this.filter = filter;
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ Scanner.Builder builder = Scanner.newBuilder();
+ if (!Bytes.equals(startRow, HConstants.EMPTY_START_ROW)) {
+ builder.setStartRow(ByteString.copyFrom(startRow));
+ }
+ if (!Bytes.equals(endRow, HConstants.EMPTY_START_ROW)) {
+ builder.setEndRow(ByteString.copyFrom(endRow));
+ }
+ for (byte[] column: columns) {
+ builder.addColumns(ByteString.copyFrom(column));
+ }
+ builder.setBatch(batch);
+ if (startTime != 0) {
+ builder.setStartTime(startTime);
+ }
+ if (endTime != 0) {
+ builder.setEndTime(endTime);
+ }
+ builder.setBatch(getBatch());
+ builder.setMaxVersions(maxVersions);
+ if (filter != null) {
+ builder.setFilter(filter);
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ Scanner.Builder builder = Scanner.newBuilder();
+ builder.mergeFrom(message);
+ if (builder.hasStartRow()) {
+ startRow = builder.getStartRow().toByteArray();
+ }
+ if (builder.hasEndRow()) {
+ endRow = builder.getEndRow().toByteArray();
+ }
+ for (ByteString column: builder.getColumnsList()) {
+ addColumn(column.toByteArray());
+ }
+ if (builder.hasBatch()) {
+ batch = builder.getBatch();
+ }
+ if (builder.hasStartTime()) {
+ startTime = builder.getStartTime();
+ }
+ if (builder.hasEndTime()) {
+ endTime = builder.getEndTime();
+ }
+ if (builder.hasMaxVersions()) {
+ maxVersions = builder.getMaxVersions();
+ }
+ if (builder.hasFilter()) {
+ filter = builder.getFilter();
+ }
+ return this;
+ }
+
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/StorageClusterStatusModel.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/StorageClusterStatusModel.java
new file mode 100644
index 0000000..f9cf42e
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/StorageClusterStatusModel.java
@@ -0,0 +1,620 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlElementWrapper;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import com.google.protobuf.ByteString;
+
+/**
+ * Representation of the status of a storage cluster:
+ *
+ *
+ * regions: the total number of regions served by the cluster
+ * requests: the total number of requests per second handled by the
+ * cluster in the last reporting interval
+ * averageLoad: the average load of the region servers in the cluster
+ * liveNodes: detailed status of the live region servers
+ * deadNodes: the names of region servers declared dead
+ *
+ *
+ *
+ * <complexType name="StorageClusterStatus">
+ * <sequence>
+ * <element name="liveNode" type="tns:Node"
+ * maxOccurs="unbounded" minOccurs="0">
+ * </element>
+ * <element name="deadNode" type="string" maxOccurs="unbounded"
+ * minOccurs="0">
+ * </element>
+ * </sequence>
+ * <attribute name="regions" type="int"></attribute>
+ * <attribute name="requests" type="int"></attribute>
+ * <attribute name="averageLoad" type="float"></attribute>
+ * </complexType>
+ *
+ * <complexType name="Node">
+ * <sequence>
+ * <element name="region" type="tns:Region"
+ * maxOccurs="unbounded" minOccurs="0"></element>
+ * </sequence>
+ * <attribute name="name" type="string"></attribute>
+ * <attribute name="startCode" type="int"></attribute>
+ * <attribute name="requests" type="int"></attribute>
+ * <attribute name="heapSizeMB" type="int"></attribute>
+ * <attribute name="maxHeapSizeMB" type="int"></attribute>
+ * </complexType>
+ *
+ * <complexType name="Region">
+ * <attribute name="name" type="base64Binary"></attribute>
+ * <attribute name="stores" type="int"></attribute>
+ * <attribute name="storefiles" type="int"></attribute>
+ * <attribute name="storefileSizeMB" type="int"></attribute>
+ * <attribute name="memstoreSizeMB" type="int"></attribute>
+ * <attribute name="storefileIndexSizeMB" type="int"></attribute>
+ * </complexType>
+ *
+ */
+@XmlRootElement(name="ClusterStatus")
+public class StorageClusterStatusModel
+ implements Serializable, ProtobufMessageHandler {
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Represents a region server.
+ */
+ public static class Node {
+
+ /**
+ * Represents a region hosted on a region server.
+ */
+ public static class Region {
+ private byte[] name;
+ private int stores;
+ private int storefiles;
+ private int storefileSizeMB;
+ private int memstoreSizeMB;
+ private int storefileIndexSizeMB;
+
+ /**
+ * Default constructor
+ */
+ public Region() {}
+
+ /**
+ * Constructor
+ * @param name the region name
+ */
+ public Region(byte[] name) {
+ this.name = name;
+ }
+
+ /**
+ * Constructor
+ * @param name the region name
+ * @param stores the number of stores
+ * @param storefiles the number of store files
+ * @param storefileSizeMB total size of store files, in MB
+ * @param memstoreSizeMB total size of memstore, in MB
+ * @param storefileIndexSizeMB total size of store file indexes, in MB
+ */
+ public Region(byte[] name, int stores, int storefiles,
+ int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB) {
+ this.name = name;
+ this.stores = stores;
+ this.storefiles = storefiles;
+ this.storefileSizeMB = storefileSizeMB;
+ this.memstoreSizeMB = memstoreSizeMB;
+ this.storefileIndexSizeMB = storefileIndexSizeMB;
+ }
+
+ /**
+ * @return the region name
+ */
+ @XmlAttribute
+ public byte[] getName() {
+ return name;
+ }
+
+ /**
+ * @return the number of stores
+ */
+ @XmlAttribute
+ public int getStores() {
+ return stores;
+ }
+
+ /**
+ * @return the number of store files
+ */
+ @XmlAttribute
+ public int getStorefiles() {
+ return storefiles;
+ }
+
+ /**
+ * @return the total size of store files, in MB
+ */
+ @XmlAttribute
+ public int getStorefileSizeMB() {
+ return storefileSizeMB;
+ }
+
+ /**
+ * @return memstore size, in MB
+ */
+ @XmlAttribute
+ public int getMemstoreSizeMB() {
+ return memstoreSizeMB;
+ }
+
+ /**
+ * @return the total size of store file indexes, in MB
+ */
+ @XmlAttribute
+ public int getStorefileIndexSizeMB() {
+ return storefileIndexSizeMB;
+ }
+
+ /**
+ * @param name the region name
+ */
+ public void setName(byte[] name) {
+ this.name = name;
+ }
+
+ /**
+ * @param stores the number of stores
+ */
+ public void setStores(int stores) {
+ this.stores = stores;
+ }
+
+ /**
+ * @param storefiles the number of store files
+ */
+ public void setStorefiles(int storefiles) {
+ this.storefiles = storefiles;
+ }
+
+ /**
+ * @param storefileSizeMB total size of store files, in MB
+ */
+ public void setStorefileSizeMB(int storefileSizeMB) {
+ this.storefileSizeMB = storefileSizeMB;
+ }
+
+ /**
+ * @param memstoreSizeMB memstore size, in MB
+ */
+ public void setMemstoreSizeMB(int memstoreSizeMB) {
+ this.memstoreSizeMB = memstoreSizeMB;
+ }
+
+ /**
+ * @param storefileIndexSizeMB total size of store file indexes, in MB
+ */
+ public void setStorefileIndexSizeMB(int storefileIndexSizeMB) {
+ this.storefileIndexSizeMB = storefileIndexSizeMB;
+ }
+ }
+
+ private String name;
+ private long startCode;
+ private int requests;
+ private int heapSizeMB;
+ private int maxHeapSizeMB;
+ private List regions = new ArrayList();
+
+ /**
+ * Add a region name to the list
+ * @param name the region name
+ */
+ public void addRegion(byte[] name, int stores, int storefiles,
+ int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB) {
+ regions.add(new Region(name, stores, storefiles, storefileSizeMB,
+ memstoreSizeMB, storefileIndexSizeMB));
+ }
+
+ /**
+ * @param index the index
+ * @return the region name
+ */
+ public Region getRegion(int index) {
+ return regions.get(index);
+ }
+
+ /**
+ * Default constructor
+ */
+ public Node() {}
+
+ /**
+ * Constructor
+ * @param name the region server name
+ * @param startCode the region server's start code
+ */
+ public Node(String name, long startCode) {
+ this.name = name;
+ this.startCode = startCode;
+ }
+
+ /**
+ * @return the region server's name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return the region server's start code
+ */
+ @XmlAttribute
+ public long getStartCode() {
+ return startCode;
+ }
+
+ /**
+ * @return the current heap size, in MB
+ */
+ @XmlAttribute
+ public int getHeapSizeMB() {
+ return heapSizeMB;
+ }
+
+ /**
+ * @return the maximum heap size, in MB
+ */
+ @XmlAttribute
+ public int getMaxHeapSizeMB() {
+ return maxHeapSizeMB;
+ }
+
+ /**
+ * @return the list of regions served by the region server
+ */
+ @XmlElement(name="Region")
+ public List getRegions() {
+ return regions;
+ }
+
+ /**
+ * @return the number of requests per second processed by the region server
+ */
+ @XmlAttribute
+ public int getRequests() {
+ return requests;
+ }
+
+ /**
+ * @param name the region server's hostname
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /**
+ * @param startCode the region server's start code
+ */
+ public void setStartCode(long startCode) {
+ this.startCode = startCode;
+ }
+
+ /**
+ * @param heapSizeMB the current heap size, in MB
+ */
+ public void setHeapSizeMB(int heapSizeMB) {
+ this.heapSizeMB = heapSizeMB;
+ }
+
+ /**
+ * @param maxHeapSizeMB the maximum heap size, in MB
+ */
+ public void setMaxHeapSizeMB(int maxHeapSizeMB) {
+ this.maxHeapSizeMB = maxHeapSizeMB;
+ }
+
+ /**
+ * @param regions a list of regions served by the region server
+ */
+ public void setRegions(List regions) {
+ this.regions = regions;
+ }
+
+ /**
+ * @param requests the number of requests per second processed by the
+ * region server
+ */
+ public void setRequests(int requests) {
+ this.requests = requests;
+ }
+ }
+
+ private List liveNodes = new ArrayList();
+ private List deadNodes = new ArrayList();
+ private int regions;
+ private int requests;
+ private double averageLoad;
+
+ /**
+ * Add a live node to the cluster representation.
+ * @param name the region server name
+ * @param startCode the region server's start code
+ * @param heapSizeMB the current heap size, in MB
+ * @param maxHeapSizeMB the maximum heap size, in MB
+ */
+ public Node addLiveNode(String name, long startCode, int heapSizeMB,
+ int maxHeapSizeMB) {
+ Node node = new Node(name, startCode);
+ node.setHeapSizeMB(heapSizeMB);
+ node.setMaxHeapSizeMB(maxHeapSizeMB);
+ liveNodes.add(node);
+ return node;
+ }
+
+ /**
+ * @param index the index
+ * @return the region server model
+ */
+ public Node getLiveNode(int index) {
+ return liveNodes.get(index);
+ }
+
+ /**
+ * Add a dead node to the cluster representation.
+ * @param node the dead region server's name
+ */
+ public void addDeadNode(String node) {
+ deadNodes.add(node);
+ }
+
+ /**
+ * @param index the index
+ * @return the dead region server's name
+ */
+ public String getDeadNode(int index) {
+ return deadNodes.get(index);
+ }
+
+ /**
+ * Default constructor
+ */
+ public StorageClusterStatusModel() {}
+
+ /**
+ * @return the list of live nodes
+ */
+ @XmlElement(name="Node")
+ @XmlElementWrapper(name="LiveNodes")
+ public List getLiveNodes() {
+ return liveNodes;
+ }
+
+ /**
+ * @return the list of dead nodes
+ */
+ @XmlElement(name="Node")
+ @XmlElementWrapper(name="DeadNodes")
+ public List getDeadNodes() {
+ return deadNodes;
+ }
+
+ /**
+ * @return the total number of regions served by the cluster
+ */
+ @XmlAttribute
+ public int getRegions() {
+ return regions;
+ }
+
+ /**
+ * @return the total number of requests per second handled by the cluster in
+ * the last reporting interval
+ */
+ @XmlAttribute
+ public int getRequests() {
+ return requests;
+ }
+
+ /**
+ * @return the average load of the region servers in the cluster
+ */
+ @XmlAttribute
+ public double getAverageLoad() {
+ return averageLoad;
+ }
+
+ /**
+ * @param nodes the list of live node models
+ */
+ public void setLiveNodes(List nodes) {
+ this.liveNodes = nodes;
+ }
+
+ /**
+ * @param nodes the list of dead node names
+ */
+ public void setDeadNodes(List nodes) {
+ this.deadNodes = nodes;
+ }
+
+ /**
+ * @param regions the total number of regions served by the cluster
+ */
+ public void setRegions(int regions) {
+ this.regions = regions;
+ }
+
+ /**
+ * @param requests the total number of requests per second handled by the
+ * cluster
+ */
+ public void setRequests(int requests) {
+ this.requests = requests;
+ }
+
+ /**
+ * @param averageLoad the average load of region servers in the cluster
+ */
+ public void setAverageLoad(double averageLoad) {
+ this.averageLoad = averageLoad;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(String.format("%d live servers, %d dead servers, " +
+ "%.4f average load\n\n", liveNodes.size(), deadNodes.size(),
+ averageLoad));
+ if (!liveNodes.isEmpty()) {
+ sb.append(liveNodes.size());
+ sb.append(" live servers\n");
+ for (Node node: liveNodes) {
+ sb.append(" ");
+ sb.append(node.name);
+ sb.append(' ');
+ sb.append(node.startCode);
+ sb.append("\n requests=");
+ sb.append(node.requests);
+ sb.append(", regions=");
+ sb.append(node.regions.size());
+ sb.append("\n heapSizeMB=");
+ sb.append(node.heapSizeMB);
+ sb.append("\n maxHeapSizeMB=");
+ sb.append(node.maxHeapSizeMB);
+ sb.append("\n\n");
+ for (Node.Region region: node.regions) {
+ sb.append(" ");
+ sb.append(Bytes.toString(region.name));
+ sb.append("\n stores=");
+ sb.append(region.stores);
+ sb.append("\n storefiless=");
+ sb.append(region.storefiles);
+ sb.append("\n storefileSizeMB=");
+ sb.append(region.storefileSizeMB);
+ sb.append("\n memstoreSizeMB=");
+ sb.append(region.memstoreSizeMB);
+ sb.append("\n storefileIndexSizeMB=");
+ sb.append(region.storefileIndexSizeMB);
+ sb.append('\n');
+ }
+ sb.append('\n');
+ }
+ }
+ if (!deadNodes.isEmpty()) {
+ sb.append('\n');
+ sb.append(deadNodes.size());
+ sb.append(" dead servers\n");
+ for (String node: deadNodes) {
+ sb.append(" ");
+ sb.append(node);
+ sb.append('\n');
+ }
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder();
+ builder.setRegions(regions);
+ builder.setRequests(requests);
+ builder.setAverageLoad(averageLoad);
+ for (Node node: liveNodes) {
+ StorageClusterStatus.Node.Builder nodeBuilder =
+ StorageClusterStatus.Node.newBuilder();
+ nodeBuilder.setName(node.name);
+ nodeBuilder.setStartCode(node.startCode);
+ nodeBuilder.setRequests(node.requests);
+ nodeBuilder.setHeapSizeMB(node.heapSizeMB);
+ nodeBuilder.setMaxHeapSizeMB(node.maxHeapSizeMB);
+ for (Node.Region region: node.regions) {
+ StorageClusterStatus.Region.Builder regionBuilder =
+ StorageClusterStatus.Region.newBuilder();
+ regionBuilder.setName(ByteString.copyFrom(region.name));
+ regionBuilder.setStores(region.stores);
+ regionBuilder.setStorefiles(region.storefiles);
+ regionBuilder.setStorefileSizeMB(region.storefileSizeMB);
+ regionBuilder.setMemstoreSizeMB(region.memstoreSizeMB);
+ regionBuilder.setStorefileIndexSizeMB(region.storefileIndexSizeMB);
+ nodeBuilder.addRegions(regionBuilder);
+ }
+ builder.addLiveNodes(nodeBuilder);
+ }
+ for (String node: deadNodes) {
+ builder.addDeadNodes(node);
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ StorageClusterStatus.Builder builder = StorageClusterStatus.newBuilder();
+ builder.mergeFrom(message);
+ if (builder.hasRegions()) {
+ regions = builder.getRegions();
+ }
+ if (builder.hasRequests()) {
+ requests = builder.getRequests();
+ }
+ if (builder.hasAverageLoad()) {
+ averageLoad = builder.getAverageLoad();
+ }
+ for (StorageClusterStatus.Node node: builder.getLiveNodesList()) {
+ long startCode = node.hasStartCode() ? node.getStartCode() : -1;
+ StorageClusterStatusModel.Node nodeModel =
+ addLiveNode(node.getName(), startCode, node.getHeapSizeMB(),
+ node.getMaxHeapSizeMB());
+ int requests = node.hasRequests() ? node.getRequests() : 0;
+ nodeModel.setRequests(requests);
+ for (StorageClusterStatus.Region region: node.getRegionsList()) {
+ nodeModel.addRegion(
+ region.getName().toByteArray(),
+ region.getStores(),
+ region.getStorefiles(),
+ region.getStorefileSizeMB(),
+ region.getMemstoreSizeMB(),
+ region.getStorefileIndexSizeMB());
+ }
+ }
+ for (String node: builder.getDeadNodesList()) {
+ addDeadNode(node);
+ }
+ return this;
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/StorageClusterVersionModel.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/StorageClusterVersionModel.java
new file mode 100644
index 0000000..2cd7c1c
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/StorageClusterVersionModel.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlValue;
+
+/**
+ * Simple representation of the version of the storage cluster
+ *
+ *
+ * <complexType name="StorageClusterVersion">
+ * <attribute name="version" type="string"></attribute>
+ * </complexType>
+ *
+ */
+@XmlRootElement(name="ClusterVersion")
+public class StorageClusterVersionModel implements Serializable {
+ private static final long serialVersionUID = 1L;
+
+ private String version;
+
+ /**
+ * @return the storage cluster version
+ */
+ @XmlValue
+ public String getVersion() {
+ return version;
+ }
+
+ /**
+ * @param version the storage cluster version
+ */
+ public void setVersion(String version) {
+ this.version = version;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ return version;
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/TableInfoModel.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/TableInfoModel.java
new file mode 100644
index 0000000..e6a1736
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/TableInfoModel.java
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlType;
+
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo;
+
+import com.google.protobuf.ByteString;
+
+/**
+ * Representation of a list of table regions.
+ *
+ *
+ * <complexType name="TableInfo">
+ * <sequence>
+ * <element name="region" type="tns:TableRegion"
+ * maxOccurs="unbounded" minOccurs="1"></element>
+ * </sequence>
+ * <attribute name="name" type="string"></attribute>
+ * </complexType>
+ *
+ */
+@XmlRootElement(name="TableInfo")
+@XmlType(propOrder = {"name","regions"})
+public class TableInfoModel implements Serializable, ProtobufMessageHandler {
+ private static final long serialVersionUID = 1L;
+
+ private String name;
+ private List regions = new ArrayList();
+
+ /**
+ * Default constructor
+ */
+ public TableInfoModel() {}
+
+ /**
+ * Constructor
+ * @param name
+ */
+ public TableInfoModel(String name) {
+ this.name = name;
+ }
+
+ /**
+ * Add a region model to the list
+ * @param region the region
+ */
+ public void add(TableRegionModel region) {
+ regions.add(region);
+ }
+
+ /**
+ * @param index the index
+ * @return the region model
+ */
+ public TableRegionModel get(int index) {
+ return regions.get(index);
+ }
+
+ /**
+ * @return the table name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return the regions
+ */
+ @XmlElement(name="Region")
+ public List getRegions() {
+ return regions;
+ }
+
+ /**
+ * @param name the table name
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /**
+ * @param regions the regions to set
+ */
+ public void setRegions(List regions) {
+ this.regions = regions;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ for(TableRegionModel aRegion : regions) {
+ sb.append(aRegion.toString());
+ sb.append('\n');
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ TableInfo.Builder builder = TableInfo.newBuilder();
+ builder.setName(name);
+ for (TableRegionModel aRegion: regions) {
+ TableInfo.Region.Builder regionBuilder = TableInfo.Region.newBuilder();
+ regionBuilder.setName(aRegion.getName());
+ regionBuilder.setId(aRegion.getId());
+ regionBuilder.setStartKey(ByteString.copyFrom(aRegion.getStartKey()));
+ regionBuilder.setEndKey(ByteString.copyFrom(aRegion.getEndKey()));
+ regionBuilder.setLocation(aRegion.getLocation());
+ builder.addRegions(regionBuilder);
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ TableInfo.Builder builder = TableInfo.newBuilder();
+ builder.mergeFrom(message);
+ setName(builder.getName());
+ for (TableInfo.Region region: builder.getRegionsList()) {
+ add(new TableRegionModel(builder.getName(), region.getId(),
+ region.getStartKey().toByteArray(),
+ region.getEndKey().toByteArray(),
+ region.getLocation()));
+ }
+ return this;
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/TableListModel.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/TableListModel.java
new file mode 100644
index 0000000..1734cfc
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/TableListModel.java
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.bind.annotation.XmlElementRef;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList;
+
+/**
+ * Simple representation of a list of table names.
+ */
+@XmlRootElement(name="TableList")
+public class TableListModel implements Serializable, ProtobufMessageHandler {
+
+ private static final long serialVersionUID = 1L;
+
+ private List tables = new ArrayList();
+
+ /**
+ * Default constructor
+ */
+ public TableListModel() {}
+
+ /**
+ * Add the table name model to the list
+ * @param table the table model
+ */
+ public void add(TableModel table) {
+ tables.add(table);
+ }
+
+ /**
+ * @param index the index
+ * @return the table model
+ */
+ public TableModel get(int index) {
+ return tables.get(index);
+ }
+
+ /**
+ * @return the tables
+ */
+ @XmlElementRef(name="table")
+ public List getTables() {
+ return tables;
+ }
+
+ /**
+ * @param tables the tables to set
+ */
+ public void setTables(List tables) {
+ this.tables = tables;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ for(TableModel aTable : tables) {
+ sb.append(aTable.toString());
+ sb.append('\n');
+ }
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ TableList.Builder builder = TableList.newBuilder();
+ for (TableModel aTable : tables) {
+ builder.addName(aTable.getName());
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ TableList.Builder builder = TableList.newBuilder();
+ builder.mergeFrom(message);
+ for (String table: builder.getNameList()) {
+ this.add(new TableModel(table));
+ }
+ return this;
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/TableModel.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/TableModel.java
new file mode 100644
index 0000000..b185b2e
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/TableModel.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+
+/**
+ * Simple representation of a table name.
+ *
+ *
+ * <complexType name="Table">
+ * <sequence>
+ * <element name="name" type="string"></element>
+ * </sequence>
+ * </complexType>
+ *
+ */
+@XmlRootElement(name="table")
+public class TableModel implements Serializable {
+
+ private static final long serialVersionUID = 1L;
+
+ private String name;
+
+ /**
+ * Default constructor
+ */
+ public TableModel() {}
+
+ /**
+ * Constructor
+ * @param name
+ */
+ public TableModel(String name) {
+ super();
+ this.name = name;
+ }
+
+ /**
+ * @return the name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @param name the name to set
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ return this.name;
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/TableRegionModel.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/TableRegionModel.java
new file mode 100644
index 0000000..9d72107
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/TableRegionModel.java
@@ -0,0 +1,181 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlType;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Representation of a region of a table and its current location on the
+ * storage cluster.
+ *
+ *
+ * <complexType name="TableRegion">
+ * <attribute name="name" type="string"></attribute>
+ * <attribute name="id" type="int"></attribute>
+ * <attribute name="startKey" type="base64Binary"></attribute>
+ * <attribute name="endKey" type="base64Binary"></attribute>
+ * <attribute name="location" type="string"></attribute>
+ * </complexType>
+ *
+ */
+@XmlRootElement(name="Region")
+@XmlType(propOrder = {"name","id","startKey","endKey","location"})
+public class TableRegionModel implements Serializable {
+
+ private static final long serialVersionUID = 1L;
+
+ private String table;
+ private long id;
+ private byte[] startKey;
+ private byte[] endKey;
+ private String location;
+
+ /**
+ * Constructor
+ */
+ public TableRegionModel() {}
+
+ /**
+ * Constructor
+ * @param table the table name
+ * @param id the encoded id of the region
+ * @param startKey the start key of the region
+ * @param endKey the end key of the region
+ * @param location the name and port of the region server hosting the region
+ */
+ public TableRegionModel(String table, long id, byte[] startKey,
+ byte[] endKey, String location) {
+ this.table = table;
+ this.id = id;
+ this.startKey = startKey;
+ this.endKey = endKey;
+ this.location = location;
+ }
+
+ /**
+ * @return the region name
+ */
+ @XmlAttribute
+ public String getName() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(table);
+ sb.append(',');
+ sb.append(Bytes.toString(startKey));
+ sb.append(',');
+ sb.append(id);
+ return sb.toString();
+ }
+
+ /**
+ * @return the encoded region id
+ */
+ @XmlAttribute
+ public long getId() {
+ return id;
+ }
+
+ /**
+ * @return the start key
+ */
+ @XmlAttribute
+ public byte[] getStartKey() {
+ return startKey;
+ }
+
+ /**
+ * @return the end key
+ */
+ @XmlAttribute
+ public byte[] getEndKey() {
+ return endKey;
+ }
+
+ /**
+ * @return the name and port of the region server hosting the region
+ */
+ @XmlAttribute
+ public String getLocation() {
+ return location;
+ }
+
+ /**
+ * @param name region printable name
+ */
+ public void setName(String name) {
+ String split[] = name.split(",");
+ table = split[0];
+ startKey = Bytes.toBytes(split[1]);
+ id = Long.valueOf(split[2]);
+ }
+
+ /**
+ * @param id the region's encoded id
+ */
+ public void setId(long id) {
+ this.id = id;
+ }
+
+ /**
+ * @param startKey the start key
+ */
+ public void setStartKey(byte[] startKey) {
+ this.startKey = startKey;
+ }
+
+ /**
+ * @param endKey the end key
+ */
+ public void setEndKey(byte[] endKey) {
+ this.endKey = endKey;
+ }
+
+ /**
+ * @param location the name and port of the region server hosting the region
+ */
+ public void setLocation(String location) {
+ this.location = location;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(getName());
+ sb.append(" [\n id=");
+ sb.append(id);
+ sb.append("\n startKey='");
+ sb.append(Bytes.toString(startKey));
+ sb.append("'\n endKey='");
+ sb.append(Bytes.toString(endKey));
+ sb.append("'\n location='");
+ sb.append(location);
+ sb.append("'\n]\n");
+ return sb.toString();
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java
new file mode 100644
index 0000000..c1f352e
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/TableSchemaModel.java
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import javax.xml.bind.annotation.XmlAnyAttribute;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlType;
+import javax.xml.namespace.QName;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema;
+import org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema;
+
+/**
+ * A representation of HBase table descriptors.
+ *
+ *
+ * <complexType name="TableSchema">
+ * <sequence>
+ * <element name="column" type="tns:ColumnSchema"
+ * maxOccurs="unbounded" minOccurs="1"></element>
+ * </sequence>
+ * <attribute name="name" type="string"></attribute>
+ * <anyAttribute></anyAttribute>
+ * </complexType>
+ *
+ */
+@XmlRootElement(name="TableSchema")
+@XmlType(propOrder = {"name","columns"})
+public class TableSchemaModel implements Serializable, ProtobufMessageHandler {
+ private static final long serialVersionUID = 1L;
+ private static final QName IS_META = new QName(HTableDescriptor.IS_META);
+ private static final QName IS_ROOT = new QName(HTableDescriptor.IS_ROOT);
+ private static final QName READONLY = new QName(HTableDescriptor.READONLY);
+ private static final QName TTL = new QName(HColumnDescriptor.TTL);
+ private static final QName VERSIONS = new QName(HConstants.VERSIONS);
+ private static final QName COMPRESSION =
+ new QName(HColumnDescriptor.COMPRESSION);
+
+ private String name;
+ private Map attrs = new HashMap();
+ private List columns = new ArrayList();
+
+ /**
+ * Default constructor.
+ */
+ public TableSchemaModel() {}
+
+ /**
+ * Add an attribute to the table descriptor
+ * @param name attribute name
+ * @param value attribute value
+ */
+ public void addAttribute(String name, Object value) {
+ attrs.put(new QName(name), value);
+ }
+
+ /**
+ * Return a table descriptor value as a string. Calls toString() on the
+ * object stored in the descriptor value map.
+ * @param name the attribute name
+ * @return the attribute value
+ */
+ public String getAttribute(String name) {
+ Object o = attrs.get(new QName(name));
+ return o != null ? o.toString() : null;
+ }
+
+ /**
+ * Add a column family to the table descriptor
+ * @param family the column family model
+ */
+ public void addColumnFamily(ColumnSchemaModel family) {
+ columns.add(family);
+ }
+
+ /**
+ * Retrieve the column family at the given index from the table descriptor
+ * @param index the index
+ * @return the column family model
+ */
+ public ColumnSchemaModel getColumnFamily(int index) {
+ return columns.get(index);
+ }
+
+ /**
+ * @return the table name
+ */
+ @XmlAttribute
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * @return the map for holding unspecified (user) attributes
+ */
+ @XmlAnyAttribute
+ public Map getAny() {
+ return attrs;
+ }
+
+ /**
+ * @return the columns
+ */
+ @XmlElement(name="ColumnSchema")
+ public List getColumns() {
+ return columns;
+ }
+
+ /**
+ * @param name the table name
+ */
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ /**
+ * @param columns the columns to set
+ */
+ public void setColumns(List columns) {
+ this.columns = columns;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("{ NAME=> '");
+ sb.append(name);
+ sb.append('\'');
+ for (Map.Entry e: attrs.entrySet()) {
+ sb.append(", ");
+ sb.append(e.getKey().getLocalPart());
+ sb.append(" => '");
+ sb.append(e.getValue().toString());
+ sb.append('\'');
+ }
+ sb.append(", COLUMNS => [ ");
+ Iterator i = columns.iterator();
+ while (i.hasNext()) {
+ ColumnSchemaModel family = i.next();
+ sb.append(family.toString());
+ if (i.hasNext()) {
+ sb.append(',');
+ }
+ sb.append(' ');
+ }
+ sb.append("] }");
+ return sb.toString();
+ }
+
+ // getters and setters for common schema attributes
+
+ // cannot be standard bean type getters and setters, otherwise this would
+ // confuse JAXB
+
+ /**
+ * @return true if IS_META attribute exists and is truel
+ */
+ public boolean __getIsMeta() {
+ Object o = attrs.get(IS_META);
+ return o != null ? Boolean.valueOf(o.toString()) : false;
+ }
+
+ /**
+ * @return true if IS_ROOT attribute exists and is truel
+ */
+ public boolean __getIsRoot() {
+ Object o = attrs.get(IS_ROOT);
+ return o != null ? Boolean.valueOf(o.toString()) : false;
+ }
+
+ /**
+ * @return true if READONLY attribute exists and is truel
+ */
+ public boolean __getReadOnly() {
+ Object o = attrs.get(READONLY);
+ return o != null ?
+ Boolean.valueOf(o.toString()) : HTableDescriptor.DEFAULT_READONLY;
+ }
+
+ /**
+ * @param value desired value of IS_META attribute
+ */
+ public void __setIsMeta(boolean value) {
+ attrs.put(IS_META, Boolean.toString(value));
+ }
+
+ /**
+ * @param value desired value of IS_ROOT attribute
+ */
+ public void __setIsRoot(boolean value) {
+ attrs.put(IS_ROOT, Boolean.toString(value));
+ }
+
+ /**
+ * @param value desired value of READONLY attribute
+ */
+ public void __setReadOnly(boolean value) {
+ attrs.put(READONLY, Boolean.toString(value));
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ TableSchema.Builder builder = TableSchema.newBuilder();
+ builder.setName(name);
+ for (Map.Entry e: attrs.entrySet()) {
+ TableSchema.Attribute.Builder attrBuilder =
+ TableSchema.Attribute.newBuilder();
+ attrBuilder.setName(e.getKey().getLocalPart());
+ attrBuilder.setValue(e.getValue().toString());
+ builder.addAttrs(attrBuilder);
+ }
+ for (ColumnSchemaModel family: columns) {
+ Map familyAttrs = family.getAny();
+ ColumnSchema.Builder familyBuilder = ColumnSchema.newBuilder();
+ familyBuilder.setName(family.getName());
+ for (Map.Entry e: familyAttrs.entrySet()) {
+ ColumnSchema.Attribute.Builder attrBuilder =
+ ColumnSchema.Attribute.newBuilder();
+ attrBuilder.setName(e.getKey().getLocalPart());
+ attrBuilder.setValue(e.getValue().toString());
+ familyBuilder.addAttrs(attrBuilder);
+ }
+ if (familyAttrs.containsKey(TTL)) {
+ familyBuilder.setTtl(
+ Integer.valueOf(familyAttrs.get(TTL).toString()));
+ }
+ if (familyAttrs.containsKey(VERSIONS)) {
+ familyBuilder.setMaxVersions(
+ Integer.valueOf(familyAttrs.get(VERSIONS).toString()));
+ }
+ if (familyAttrs.containsKey(COMPRESSION)) {
+ familyBuilder.setCompression(familyAttrs.get(COMPRESSION).toString());
+ }
+ builder.addColumns(familyBuilder);
+ }
+ if (attrs.containsKey(READONLY)) {
+ builder.setReadOnly(
+ Boolean.valueOf(attrs.get(READONLY).toString()));
+ }
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ TableSchema.Builder builder = TableSchema.newBuilder();
+ builder.mergeFrom(message);
+ this.setName(builder.getName());
+ for (TableSchema.Attribute attr: builder.getAttrsList()) {
+ this.addAttribute(attr.getName(), attr.getValue());
+ }
+ if (builder.hasReadOnly()) {
+ this.addAttribute(HTableDescriptor.READONLY, builder.getReadOnly());
+ }
+ for (ColumnSchema family: builder.getColumnsList()) {
+ ColumnSchemaModel familyModel = new ColumnSchemaModel();
+ familyModel.setName(family.getName());
+ for (ColumnSchema.Attribute attr: family.getAttrsList()) {
+ familyModel.addAttribute(attr.getName(), attr.getValue());
+ }
+ if (family.hasTtl()) {
+ familyModel.addAttribute(HColumnDescriptor.TTL, family.getTtl());
+ }
+ if (family.hasMaxVersions()) {
+ familyModel.addAttribute(HConstants.VERSIONS,
+ family.getMaxVersions());
+ }
+ if (family.hasCompression()) {
+ familyModel.addAttribute(HColumnDescriptor.COMPRESSION,
+ family.getCompression());
+ }
+ this.addColumnFamily(familyModel);
+ }
+ return this;
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/VersionModel.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/VersionModel.java
new file mode 100644
index 0000000..98457c7
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/model/VersionModel.java
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+import javax.servlet.ServletContext;
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
+import org.apache.hadoop.hbase.stargate.RESTServlet;
+import org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version;
+
+import com.sun.jersey.spi.container.servlet.ServletContainer;
+
+/**
+ * A representation of the collection of versions of the Stargate software
+ * components.
+ *
+ * stargateVersion: Stargate revision
+ * jvmVersion: the JVM vendor and version information
+ * osVersion: the OS type, version, and hardware architecture
+ * serverVersion: the name and version of the servlet container
+ * jerseyVersion: the version of the embedded Jersey framework
+ *
+ */
+@XmlRootElement(name="Version")
+public class VersionModel implements Serializable, ProtobufMessageHandler {
+
+ private static final long serialVersionUID = 1L;
+
+ private String stargateVersion;
+ private String jvmVersion;
+ private String osVersion;
+ private String serverVersion;
+ private String jerseyVersion;
+
+ /**
+ * Default constructor. Do not use.
+ */
+ public VersionModel() {}
+
+ /**
+ * Constructor
+ * @param context the servlet context
+ */
+ public VersionModel(ServletContext context) {
+ stargateVersion = RESTServlet.VERSION_STRING;
+ jvmVersion = System.getProperty("java.vm.vendor") + ' ' +
+ System.getProperty("java.version") + '-' +
+ System.getProperty("java.vm.version");
+ osVersion = System.getProperty("os.name") + ' ' +
+ System.getProperty("os.version") + ' ' +
+ System.getProperty("os.arch");
+ serverVersion = context.getServerInfo();
+ jerseyVersion = ServletContainer.class.getPackage()
+ .getImplementationVersion();
+ }
+
+ /**
+ * @return the Stargate version
+ */
+ @XmlAttribute(name="Stargate")
+ public String getStargateVersion() {
+ return stargateVersion;
+ }
+
+ /**
+ * @return the JVM vendor and version
+ */
+ @XmlAttribute(name="JVM")
+ public String getJvmVersion() {
+ return jvmVersion;
+ }
+
+ /**
+ * @return the OS name, version, and hardware architecture
+ */
+ @XmlAttribute(name="OS")
+ public String getOsVersion() {
+ return osVersion;
+ }
+
+ /**
+ * @return the servlet container version
+ */
+ @XmlAttribute(name="Server")
+ public String getServerVersion() {
+ return serverVersion;
+ }
+
+ /**
+ * @return the version of the embedded Jersey framework
+ */
+ @XmlAttribute(name="Jersey")
+ public String getJerseyVersion() {
+ return jerseyVersion;
+ }
+
+ /**
+ * @param version the Stargate version string
+ */
+ public void setStargateVersion(String version) {
+ this.stargateVersion = version;
+ }
+
+ /**
+ * @param version the OS version string
+ */
+ public void setOsVersion(String version) {
+ this.osVersion = version;
+ }
+
+ /**
+ * @param version the JVM version string
+ */
+ public void setJvmVersion(String version) {
+ this.jvmVersion = version;
+ }
+
+ /**
+ * @param version the servlet container version string
+ */
+ public void setServerVersion(String version) {
+ this.serverVersion = version;
+ }
+
+ /**
+ * @param version the Jersey framework version string
+ */
+ public void setJerseyVersion(String version) {
+ this.jerseyVersion = version;
+ }
+
+ /* (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("Stargate ");
+ sb.append(stargateVersion);
+ sb.append(" [JVM: ");
+ sb.append(jvmVersion);
+ sb.append("] [OS: ");
+ sb.append(osVersion);
+ sb.append("] [Server: ");
+ sb.append(serverVersion);
+ sb.append("] [Jersey: ");
+ sb.append(jerseyVersion);
+ sb.append("]\n");
+ return sb.toString();
+ }
+
+ @Override
+ public byte[] createProtobufOutput() {
+ Version.Builder builder = Version.newBuilder();
+ builder.setStargateVersion(stargateVersion);
+ builder.setJvmVersion(jvmVersion);
+ builder.setOsVersion(osVersion);
+ builder.setServerVersion(serverVersion);
+ builder.setJerseyVersion(jerseyVersion);
+ return builder.build().toByteArray();
+ }
+
+ @Override
+ public ProtobufMessageHandler getObjectFromMessage(byte[] message)
+ throws IOException {
+ Version.Builder builder = Version.newBuilder();
+ builder.mergeFrom(message);
+ if (builder.hasStargateVersion()) {
+ stargateVersion = builder.getStargateVersion();
+ }
+ if (builder.hasJvmVersion()) {
+ jvmVersion = builder.getJvmVersion();
+ }
+ if (builder.hasOsVersion()) {
+ osVersion = builder.getOsVersion();
+ }
+ if (builder.hasServerVersion()) {
+ serverVersion = builder.getServerVersion();
+ }
+ if (builder.hasJerseyVersion()) {
+ jerseyVersion = builder.getJerseyVersion();
+ }
+ return this;
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/CellMessage.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/CellMessage.java
new file mode 100644
index 0000000..3206198
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/CellMessage.java
@@ -0,0 +1,465 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: CellMessage.proto
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class CellMessage {
+ private CellMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class Cell extends
+ com.google.protobuf.GeneratedMessage {
+ // Use Cell.newBuilder() to construct.
+ private Cell() {
+ initFields();
+ }
+ private Cell(boolean noInit) {}
+
+ private static final Cell defaultInstance;
+ public static Cell getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Cell getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_fieldAccessorTable;
+ }
+
+ // optional bytes row = 1;
+ public static final int ROW_FIELD_NUMBER = 1;
+ private boolean hasRow;
+ private com.google.protobuf.ByteString row_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasRow() { return hasRow; }
+ public com.google.protobuf.ByteString getRow() { return row_; }
+
+ // optional bytes column = 2;
+ public static final int COLUMN_FIELD_NUMBER = 2;
+ private boolean hasColumn;
+ private com.google.protobuf.ByteString column_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasColumn() { return hasColumn; }
+ public com.google.protobuf.ByteString getColumn() { return column_; }
+
+ // optional int64 timestamp = 3;
+ public static final int TIMESTAMP_FIELD_NUMBER = 3;
+ private boolean hasTimestamp;
+ private long timestamp_ = 0L;
+ public boolean hasTimestamp() { return hasTimestamp; }
+ public long getTimestamp() { return timestamp_; }
+
+ // optional bytes data = 4;
+ public static final int DATA_FIELD_NUMBER = 4;
+ private boolean hasData;
+ private com.google.protobuf.ByteString data_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasData() { return hasData; }
+ public com.google.protobuf.ByteString getData() { return data_; }
+
+ private void initFields() {
+ }
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (hasRow()) {
+ output.writeBytes(1, getRow());
+ }
+ if (hasColumn()) {
+ output.writeBytes(2, getColumn());
+ }
+ if (hasTimestamp()) {
+ output.writeInt64(3, getTimestamp());
+ }
+ if (hasData()) {
+ output.writeBytes(4, getData());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasRow()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getRow());
+ }
+ if (hasColumn()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getColumn());
+ }
+ if (hasTimestamp()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(3, getTimestamp());
+ }
+ if (hasData()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(4, getData());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell result;
+
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.newBuilder()
+ private Builder() {}
+
+ private static Builder create() {
+ Builder builder = new Builder();
+ builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell();
+ return builder;
+ }
+
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell internalGetResult() {
+ return result;
+ }
+
+ public Builder clear() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "Cannot call clear() after build().");
+ }
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(result);
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.getDefaultInstance();
+ }
+
+ public boolean isInitialized() {
+ return result.isInitialized();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell build() {
+ if (result != null && !isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder.");
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.getDefaultInstance()) return this;
+ if (other.hasRow()) {
+ setRow(other.getRow());
+ }
+ if (other.hasColumn()) {
+ setColumn(other.getColumn());
+ }
+ if (other.hasTimestamp()) {
+ setTimestamp(other.getTimestamp());
+ }
+ if (other.hasData()) {
+ setData(other.getData());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setRow(input.readBytes());
+ break;
+ }
+ case 18: {
+ setColumn(input.readBytes());
+ break;
+ }
+ case 24: {
+ setTimestamp(input.readInt64());
+ break;
+ }
+ case 34: {
+ setData(input.readBytes());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // optional bytes row = 1;
+ public boolean hasRow() {
+ return result.hasRow();
+ }
+ public com.google.protobuf.ByteString getRow() {
+ return result.getRow();
+ }
+ public Builder setRow(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasRow = true;
+ result.row_ = value;
+ return this;
+ }
+ public Builder clearRow() {
+ result.hasRow = false;
+ result.row_ = getDefaultInstance().getRow();
+ return this;
+ }
+
+ // optional bytes column = 2;
+ public boolean hasColumn() {
+ return result.hasColumn();
+ }
+ public com.google.protobuf.ByteString getColumn() {
+ return result.getColumn();
+ }
+ public Builder setColumn(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasColumn = true;
+ result.column_ = value;
+ return this;
+ }
+ public Builder clearColumn() {
+ result.hasColumn = false;
+ result.column_ = getDefaultInstance().getColumn();
+ return this;
+ }
+
+ // optional int64 timestamp = 3;
+ public boolean hasTimestamp() {
+ return result.hasTimestamp();
+ }
+ public long getTimestamp() {
+ return result.getTimestamp();
+ }
+ public Builder setTimestamp(long value) {
+ result.hasTimestamp = true;
+ result.timestamp_ = value;
+ return this;
+ }
+ public Builder clearTimestamp() {
+ result.hasTimestamp = false;
+ result.timestamp_ = 0L;
+ return this;
+ }
+
+ // optional bytes data = 4;
+ public boolean hasData() {
+ return result.hasData();
+ }
+ public com.google.protobuf.ByteString getData() {
+ return result.getData();
+ }
+ public Builder setData(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasData = true;
+ result.data_ = value;
+ return this;
+ }
+ public Builder clearData() {
+ result.hasData = false;
+ result.data_ = getDefaultInstance().getData();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.Cell)
+ }
+
+ static {
+ defaultInstance = new Cell(true);
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.internalForceInit();
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.Cell)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\021CellMessage.proto\0223org.apache.hadoop.h" +
+ "base.stargate.protobuf.generated\"D\n\004Cell" +
+ "\022\013\n\003row\030\001 \001(\014\022\016\n\006column\030\002 \001(\014\022\021\n\ttimesta" +
+ "mp\030\003 \001(\003\022\014\n\004data\030\004 \001(\014"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Cell_descriptor,
+ new java.lang.String[] { "Row", "Column", "Timestamp", "Data", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ public static void internalForceInit() {}
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/CellSetMessage.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/CellSetMessage.java
new file mode 100644
index 0000000..0981ba6
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/CellSetMessage.java
@@ -0,0 +1,781 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: CellSetMessage.proto
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class CellSetMessage {
+ private CellSetMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class CellSet extends
+ com.google.protobuf.GeneratedMessage {
+ // Use CellSet.newBuilder() to construct.
+ private CellSet() {
+ initFields();
+ }
+ private CellSet(boolean noInit) {}
+
+ private static final CellSet defaultInstance;
+ public static CellSet getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CellSet getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_fieldAccessorTable;
+ }
+
+ public static final class Row extends
+ com.google.protobuf.GeneratedMessage {
+ // Use Row.newBuilder() to construct.
+ private Row() {
+ initFields();
+ }
+ private Row(boolean noInit) {}
+
+ private static final Row defaultInstance;
+ public static Row getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Row getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_fieldAccessorTable;
+ }
+
+ // required bytes key = 1;
+ public static final int KEY_FIELD_NUMBER = 1;
+ private boolean hasKey;
+ private com.google.protobuf.ByteString key_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasKey() { return hasKey; }
+ public com.google.protobuf.ByteString getKey() { return key_; }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.Cell values = 2;
+ public static final int VALUES_FIELD_NUMBER = 2;
+ private java.util.List values_ =
+ java.util.Collections.emptyList();
+ public java.util.List getValuesList() {
+ return values_;
+ }
+ public int getValuesCount() { return values_.size(); }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell getValues(int index) {
+ return values_.get(index);
+ }
+
+ private void initFields() {
+ }
+ public final boolean isInitialized() {
+ if (!hasKey) return false;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (hasKey()) {
+ output.writeBytes(1, getKey());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell element : getValuesList()) {
+ output.writeMessage(2, element);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasKey()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getKey());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell element : getValuesList()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, element);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row result;
+
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.newBuilder()
+ private Builder() {}
+
+ private static Builder create() {
+ Builder builder = new Builder();
+ builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row();
+ return builder;
+ }
+
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row internalGetResult() {
+ return result;
+ }
+
+ public Builder clear() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "Cannot call clear() after build().");
+ }
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(result);
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.getDefaultInstance();
+ }
+
+ public boolean isInitialized() {
+ return result.isInitialized();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row build() {
+ if (result != null && !isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder.");
+ }
+ if (result.values_ != java.util.Collections.EMPTY_LIST) {
+ result.values_ =
+ java.util.Collections.unmodifiableList(result.values_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.getDefaultInstance()) return this;
+ if (other.hasKey()) {
+ setKey(other.getKey());
+ }
+ if (!other.values_.isEmpty()) {
+ if (result.values_.isEmpty()) {
+ result.values_ = new java.util.ArrayList();
+ }
+ result.values_.addAll(other.values_);
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setKey(input.readBytes());
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addValues(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // required bytes key = 1;
+ public boolean hasKey() {
+ return result.hasKey();
+ }
+ public com.google.protobuf.ByteString getKey() {
+ return result.getKey();
+ }
+ public Builder setKey(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasKey = true;
+ result.key_ = value;
+ return this;
+ }
+ public Builder clearKey() {
+ result.hasKey = false;
+ result.key_ = getDefaultInstance().getKey();
+ return this;
+ }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.Cell values = 2;
+ public java.util.List getValuesList() {
+ return java.util.Collections.unmodifiableList(result.values_);
+ }
+ public int getValuesCount() {
+ return result.getValuesCount();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell getValues(int index) {
+ return result.getValues(index);
+ }
+ public Builder setValues(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.values_.set(index, value);
+ return this;
+ }
+ public Builder setValues(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.Builder builderForValue) {
+ result.values_.set(index, builderForValue.build());
+ return this;
+ }
+ public Builder addValues(org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.values_.isEmpty()) {
+ result.values_ = new java.util.ArrayList();
+ }
+ result.values_.add(value);
+ return this;
+ }
+ public Builder addValues(org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell.Builder builderForValue) {
+ if (result.values_.isEmpty()) {
+ result.values_ = new java.util.ArrayList();
+ }
+ result.values_.add(builderForValue.build());
+ return this;
+ }
+ public Builder addAllValues(
+ java.lang.Iterable extends org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell> values) {
+ if (result.values_.isEmpty()) {
+ result.values_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.values_);
+ return this;
+ }
+ public Builder clearValues() {
+ result.values_ = java.util.Collections.emptyList();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.CellSet.Row)
+ }
+
+ static {
+ defaultInstance = new Row(true);
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.internalForceInit();
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.CellSet.Row)
+ }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.CellSet.Row rows = 1;
+ public static final int ROWS_FIELD_NUMBER = 1;
+ private java.util.List rows_ =
+ java.util.Collections.emptyList();
+ public java.util.List getRowsList() {
+ return rows_;
+ }
+ public int getRowsCount() { return rows_.size(); }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row getRows(int index) {
+ return rows_.get(index);
+ }
+
+ private void initFields() {
+ }
+ public final boolean isInitialized() {
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row element : getRowsList()) {
+ if (!element.isInitialized()) return false;
+ }
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row element : getRowsList()) {
+ output.writeMessage(1, element);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row element : getRowsList()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, element);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet result;
+
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.newBuilder()
+ private Builder() {}
+
+ private static Builder create() {
+ Builder builder = new Builder();
+ builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet();
+ return builder;
+ }
+
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet internalGetResult() {
+ return result;
+ }
+
+ public Builder clear() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "Cannot call clear() after build().");
+ }
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(result);
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.getDefaultInstance();
+ }
+
+ public boolean isInitialized() {
+ return result.isInitialized();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet build() {
+ if (result != null && !isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder.");
+ }
+ if (result.rows_ != java.util.Collections.EMPTY_LIST) {
+ result.rows_ =
+ java.util.Collections.unmodifiableList(result.rows_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.getDefaultInstance()) return this;
+ if (!other.rows_.isEmpty()) {
+ if (result.rows_.isEmpty()) {
+ result.rows_ = new java.util.ArrayList();
+ }
+ result.rows_.addAll(other.rows_);
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addRows(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.CellSet.Row rows = 1;
+ public java.util.List getRowsList() {
+ return java.util.Collections.unmodifiableList(result.rows_);
+ }
+ public int getRowsCount() {
+ return result.getRowsCount();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row getRows(int index) {
+ return result.getRows(index);
+ }
+ public Builder setRows(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.rows_.set(index, value);
+ return this;
+ }
+ public Builder setRows(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.Builder builderForValue) {
+ result.rows_.set(index, builderForValue.build());
+ return this;
+ }
+ public Builder addRows(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.rows_.isEmpty()) {
+ result.rows_ = new java.util.ArrayList();
+ }
+ result.rows_.add(value);
+ return this;
+ }
+ public Builder addRows(org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.Builder builderForValue) {
+ if (result.rows_.isEmpty()) {
+ result.rows_ = new java.util.ArrayList();
+ }
+ result.rows_.add(builderForValue.build());
+ return this;
+ }
+ public Builder addAllRows(
+ java.lang.Iterable extends org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row> values) {
+ if (result.rows_.isEmpty()) {
+ result.rows_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.rows_);
+ return this;
+ }
+ public Builder clearRows() {
+ result.rows_ = java.util.Collections.emptyList();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.CellSet)
+ }
+
+ static {
+ defaultInstance = new CellSet(true);
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.internalForceInit();
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.CellSet)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\024CellSetMessage.proto\0223org.apache.hadoo" +
+ "p.hbase.stargate.protobuf.generated\032\021Cel" +
+ "lMessage.proto\"\270\001\n\007CellSet\022N\n\004rows\030\001 \003(\013" +
+ "2@.org.apache.hadoop.hbase.stargate.prot" +
+ "obuf.generated.CellSet.Row\032]\n\003Row\022\013\n\003key" +
+ "\030\001 \002(\014\022I\n\006values\030\002 \003(\01329.org.apache.hado" +
+ "op.hbase.stargate.protobuf.generated.Cel" +
+ "l"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_descriptor,
+ new java.lang.String[] { "Rows", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Builder.class);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_descriptor =
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_descriptor.getNestedTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_CellSet_Row_descriptor,
+ new java.lang.String[] { "Key", "Values", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet.Row.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.getDescriptor(),
+ }, assigner);
+ }
+
+ public static void internalForceInit() {}
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/ColumnSchemaMessage.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/ColumnSchemaMessage.java
new file mode 100644
index 0000000..7f213d8
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/ColumnSchemaMessage.java
@@ -0,0 +1,899 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: ColumnSchemaMessage.proto
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class ColumnSchemaMessage {
+ private ColumnSchemaMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class ColumnSchema extends
+ com.google.protobuf.GeneratedMessage {
+ // Use ColumnSchema.newBuilder() to construct.
+ private ColumnSchema() {
+ initFields();
+ }
+ private ColumnSchema(boolean noInit) {}
+
+ private static final ColumnSchema defaultInstance;
+ public static ColumnSchema getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public ColumnSchema getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_fieldAccessorTable;
+ }
+
+ public static final class Attribute extends
+ com.google.protobuf.GeneratedMessage {
+ // Use Attribute.newBuilder() to construct.
+ private Attribute() {
+ initFields();
+ }
+ private Attribute(boolean noInit) {}
+
+ private static final Attribute defaultInstance;
+ public static Attribute getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Attribute getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_fieldAccessorTable;
+ }
+
+ // required string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private boolean hasName;
+ private java.lang.String name_ = "";
+ public boolean hasName() { return hasName; }
+ public java.lang.String getName() { return name_; }
+
+ // required string value = 2;
+ public static final int VALUE_FIELD_NUMBER = 2;
+ private boolean hasValue;
+ private java.lang.String value_ = "";
+ public boolean hasValue() { return hasValue; }
+ public java.lang.String getValue() { return value_; }
+
+ private void initFields() {
+ }
+ public final boolean isInitialized() {
+ if (!hasName) return false;
+ if (!hasValue) return false;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (hasName()) {
+ output.writeString(1, getName());
+ }
+ if (hasValue()) {
+ output.writeString(2, getValue());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasName()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(1, getName());
+ }
+ if (hasValue()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(2, getValue());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute result;
+
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.newBuilder()
+ private Builder() {}
+
+ private static Builder create() {
+ Builder builder = new Builder();
+ builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute();
+ return builder;
+ }
+
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute internalGetResult() {
+ return result;
+ }
+
+ public Builder clear() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "Cannot call clear() after build().");
+ }
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(result);
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.getDefaultInstance();
+ }
+
+ public boolean isInitialized() {
+ return result.isInitialized();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute build() {
+ if (result != null && !isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder.");
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (other.hasValue()) {
+ setValue(other.getValue());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setName(input.readString());
+ break;
+ }
+ case 18: {
+ setValue(input.readString());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // required string name = 1;
+ public boolean hasName() {
+ return result.hasName();
+ }
+ public java.lang.String getName() {
+ return result.getName();
+ }
+ public Builder setName(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasName = true;
+ result.name_ = value;
+ return this;
+ }
+ public Builder clearName() {
+ result.hasName = false;
+ result.name_ = getDefaultInstance().getName();
+ return this;
+ }
+
+ // required string value = 2;
+ public boolean hasValue() {
+ return result.hasValue();
+ }
+ public java.lang.String getValue() {
+ return result.getValue();
+ }
+ public Builder setValue(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasValue = true;
+ result.value_ = value;
+ return this;
+ }
+ public Builder clearValue() {
+ result.hasValue = false;
+ result.value_ = getDefaultInstance().getValue();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema.Attribute)
+ }
+
+ static {
+ defaultInstance = new Attribute(true);
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.internalForceInit();
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema.Attribute)
+ }
+
+ // optional string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private boolean hasName;
+ private java.lang.String name_ = "";
+ public boolean hasName() { return hasName; }
+ public java.lang.String getName() { return name_; }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema.Attribute attrs = 2;
+ public static final int ATTRS_FIELD_NUMBER = 2;
+ private java.util.List attrs_ =
+ java.util.Collections.emptyList();
+ public java.util.List getAttrsList() {
+ return attrs_;
+ }
+ public int getAttrsCount() { return attrs_.size(); }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute getAttrs(int index) {
+ return attrs_.get(index);
+ }
+
+ // optional int32 ttl = 3;
+ public static final int TTL_FIELD_NUMBER = 3;
+ private boolean hasTtl;
+ private int ttl_ = 0;
+ public boolean hasTtl() { return hasTtl; }
+ public int getTtl() { return ttl_; }
+
+ // optional int32 maxVersions = 4;
+ public static final int MAXVERSIONS_FIELD_NUMBER = 4;
+ private boolean hasMaxVersions;
+ private int maxVersions_ = 0;
+ public boolean hasMaxVersions() { return hasMaxVersions; }
+ public int getMaxVersions() { return maxVersions_; }
+
+ // optional string compression = 5;
+ public static final int COMPRESSION_FIELD_NUMBER = 5;
+ private boolean hasCompression;
+ private java.lang.String compression_ = "";
+ public boolean hasCompression() { return hasCompression; }
+ public java.lang.String getCompression() { return compression_; }
+
+ private void initFields() {
+ }
+ public final boolean isInitialized() {
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute element : getAttrsList()) {
+ if (!element.isInitialized()) return false;
+ }
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (hasName()) {
+ output.writeString(1, getName());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute element : getAttrsList()) {
+ output.writeMessage(2, element);
+ }
+ if (hasTtl()) {
+ output.writeInt32(3, getTtl());
+ }
+ if (hasMaxVersions()) {
+ output.writeInt32(4, getMaxVersions());
+ }
+ if (hasCompression()) {
+ output.writeString(5, getCompression());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasName()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(1, getName());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute element : getAttrsList()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, element);
+ }
+ if (hasTtl()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(3, getTtl());
+ }
+ if (hasMaxVersions()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(4, getMaxVersions());
+ }
+ if (hasCompression()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(5, getCompression());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema result;
+
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.newBuilder()
+ private Builder() {}
+
+ private static Builder create() {
+ Builder builder = new Builder();
+ builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema();
+ return builder;
+ }
+
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema internalGetResult() {
+ return result;
+ }
+
+ public Builder clear() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "Cannot call clear() after build().");
+ }
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(result);
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.getDefaultInstance();
+ }
+
+ public boolean isInitialized() {
+ return result.isInitialized();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema build() {
+ if (result != null && !isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder.");
+ }
+ if (result.attrs_ != java.util.Collections.EMPTY_LIST) {
+ result.attrs_ =
+ java.util.Collections.unmodifiableList(result.attrs_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (!other.attrs_.isEmpty()) {
+ if (result.attrs_.isEmpty()) {
+ result.attrs_ = new java.util.ArrayList();
+ }
+ result.attrs_.addAll(other.attrs_);
+ }
+ if (other.hasTtl()) {
+ setTtl(other.getTtl());
+ }
+ if (other.hasMaxVersions()) {
+ setMaxVersions(other.getMaxVersions());
+ }
+ if (other.hasCompression()) {
+ setCompression(other.getCompression());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setName(input.readString());
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addAttrs(subBuilder.buildPartial());
+ break;
+ }
+ case 24: {
+ setTtl(input.readInt32());
+ break;
+ }
+ case 32: {
+ setMaxVersions(input.readInt32());
+ break;
+ }
+ case 42: {
+ setCompression(input.readString());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // optional string name = 1;
+ public boolean hasName() {
+ return result.hasName();
+ }
+ public java.lang.String getName() {
+ return result.getName();
+ }
+ public Builder setName(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasName = true;
+ result.name_ = value;
+ return this;
+ }
+ public Builder clearName() {
+ result.hasName = false;
+ result.name_ = getDefaultInstance().getName();
+ return this;
+ }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema.Attribute attrs = 2;
+ public java.util.List getAttrsList() {
+ return java.util.Collections.unmodifiableList(result.attrs_);
+ }
+ public int getAttrsCount() {
+ return result.getAttrsCount();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute getAttrs(int index) {
+ return result.getAttrs(index);
+ }
+ public Builder setAttrs(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.attrs_.set(index, value);
+ return this;
+ }
+ public Builder setAttrs(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder builderForValue) {
+ result.attrs_.set(index, builderForValue.build());
+ return this;
+ }
+ public Builder addAttrs(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.attrs_.isEmpty()) {
+ result.attrs_ = new java.util.ArrayList();
+ }
+ result.attrs_.add(value);
+ return this;
+ }
+ public Builder addAttrs(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder builderForValue) {
+ if (result.attrs_.isEmpty()) {
+ result.attrs_ = new java.util.ArrayList();
+ }
+ result.attrs_.add(builderForValue.build());
+ return this;
+ }
+ public Builder addAllAttrs(
+ java.lang.Iterable extends org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute> values) {
+ if (result.attrs_.isEmpty()) {
+ result.attrs_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.attrs_);
+ return this;
+ }
+ public Builder clearAttrs() {
+ result.attrs_ = java.util.Collections.emptyList();
+ return this;
+ }
+
+ // optional int32 ttl = 3;
+ public boolean hasTtl() {
+ return result.hasTtl();
+ }
+ public int getTtl() {
+ return result.getTtl();
+ }
+ public Builder setTtl(int value) {
+ result.hasTtl = true;
+ result.ttl_ = value;
+ return this;
+ }
+ public Builder clearTtl() {
+ result.hasTtl = false;
+ result.ttl_ = 0;
+ return this;
+ }
+
+ // optional int32 maxVersions = 4;
+ public boolean hasMaxVersions() {
+ return result.hasMaxVersions();
+ }
+ public int getMaxVersions() {
+ return result.getMaxVersions();
+ }
+ public Builder setMaxVersions(int value) {
+ result.hasMaxVersions = true;
+ result.maxVersions_ = value;
+ return this;
+ }
+ public Builder clearMaxVersions() {
+ result.hasMaxVersions = false;
+ result.maxVersions_ = 0;
+ return this;
+ }
+
+ // optional string compression = 5;
+ public boolean hasCompression() {
+ return result.hasCompression();
+ }
+ public java.lang.String getCompression() {
+ return result.getCompression();
+ }
+ public Builder setCompression(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasCompression = true;
+ result.compression_ = value;
+ return this;
+ }
+ public Builder clearCompression() {
+ result.hasCompression = false;
+ result.compression_ = getDefaultInstance().getCompression();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema)
+ }
+
+ static {
+ defaultInstance = new ColumnSchema(true);
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.internalForceInit();
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\031ColumnSchemaMessage.proto\0223org.apache." +
+ "hadoop.hbase.stargate.protobuf.generated" +
+ "\"\331\001\n\014ColumnSchema\022\014\n\004name\030\001 \001(\t\022Z\n\005attrs" +
+ "\030\002 \003(\0132K.org.apache.hadoop.hbase.stargat" +
+ "e.protobuf.generated.ColumnSchema.Attrib" +
+ "ute\022\013\n\003ttl\030\003 \001(\005\022\023\n\013maxVersions\030\004 \001(\005\022\023\n" +
+ "\013compression\030\005 \001(\t\032(\n\tAttribute\022\014\n\004name\030" +
+ "\001 \002(\t\022\r\n\005value\030\002 \002(\t"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_descriptor,
+ new java.lang.String[] { "Name", "Attrs", "Ttl", "MaxVersions", "Compression", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder.class);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_descriptor =
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_descriptor.getNestedTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_ColumnSchema_Attribute_descriptor,
+ new java.lang.String[] { "Name", "Value", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Attribute.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ public static void internalForceInit() {}
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/ScannerMessage.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/ScannerMessage.java
new file mode 100644
index 0000000..e4365d8
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/ScannerMessage.java
@@ -0,0 +1,662 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: ScannerMessage.proto
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class ScannerMessage {
+ private ScannerMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class Scanner extends
+ com.google.protobuf.GeneratedMessage {
+ // Use Scanner.newBuilder() to construct.
+ private Scanner() {
+ initFields();
+ }
+ private Scanner(boolean noInit) {}
+
+ private static final Scanner defaultInstance;
+ public static Scanner getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Scanner getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_fieldAccessorTable;
+ }
+
+ // optional bytes startRow = 1;
+ public static final int STARTROW_FIELD_NUMBER = 1;
+ private boolean hasStartRow;
+ private com.google.protobuf.ByteString startRow_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasStartRow() { return hasStartRow; }
+ public com.google.protobuf.ByteString getStartRow() { return startRow_; }
+
+ // optional bytes endRow = 2;
+ public static final int ENDROW_FIELD_NUMBER = 2;
+ private boolean hasEndRow;
+ private com.google.protobuf.ByteString endRow_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasEndRow() { return hasEndRow; }
+ public com.google.protobuf.ByteString getEndRow() { return endRow_; }
+
+ // repeated bytes columns = 3;
+ public static final int COLUMNS_FIELD_NUMBER = 3;
+ private java.util.List columns_ =
+ java.util.Collections.emptyList();
+ public java.util.List getColumnsList() {
+ return columns_;
+ }
+ public int getColumnsCount() { return columns_.size(); }
+ public com.google.protobuf.ByteString getColumns(int index) {
+ return columns_.get(index);
+ }
+
+ // optional int32 batch = 4;
+ public static final int BATCH_FIELD_NUMBER = 4;
+ private boolean hasBatch;
+ private int batch_ = 0;
+ public boolean hasBatch() { return hasBatch; }
+ public int getBatch() { return batch_; }
+
+ // optional int64 startTime = 5;
+ public static final int STARTTIME_FIELD_NUMBER = 5;
+ private boolean hasStartTime;
+ private long startTime_ = 0L;
+ public boolean hasStartTime() { return hasStartTime; }
+ public long getStartTime() { return startTime_; }
+
+ // optional int64 endTime = 6;
+ public static final int ENDTIME_FIELD_NUMBER = 6;
+ private boolean hasEndTime;
+ private long endTime_ = 0L;
+ public boolean hasEndTime() { return hasEndTime; }
+ public long getEndTime() { return endTime_; }
+
+ // optional int32 maxVersions = 7;
+ public static final int MAXVERSIONS_FIELD_NUMBER = 7;
+ private boolean hasMaxVersions;
+ private int maxVersions_ = 0;
+ public boolean hasMaxVersions() { return hasMaxVersions; }
+ public int getMaxVersions() { return maxVersions_; }
+
+ // optional string filter = 8;
+ public static final int FILTER_FIELD_NUMBER = 8;
+ private boolean hasFilter;
+ private java.lang.String filter_ = "";
+ public boolean hasFilter() { return hasFilter; }
+ public java.lang.String getFilter() { return filter_; }
+
+ private void initFields() {
+ }
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (hasStartRow()) {
+ output.writeBytes(1, getStartRow());
+ }
+ if (hasEndRow()) {
+ output.writeBytes(2, getEndRow());
+ }
+ for (com.google.protobuf.ByteString element : getColumnsList()) {
+ output.writeBytes(3, element);
+ }
+ if (hasBatch()) {
+ output.writeInt32(4, getBatch());
+ }
+ if (hasStartTime()) {
+ output.writeInt64(5, getStartTime());
+ }
+ if (hasEndTime()) {
+ output.writeInt64(6, getEndTime());
+ }
+ if (hasMaxVersions()) {
+ output.writeInt32(7, getMaxVersions());
+ }
+ if (hasFilter()) {
+ output.writeString(8, getFilter());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasStartRow()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getStartRow());
+ }
+ if (hasEndRow()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getEndRow());
+ }
+ {
+ int dataSize = 0;
+ for (com.google.protobuf.ByteString element : getColumnsList()) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeBytesSizeNoTag(element);
+ }
+ size += dataSize;
+ size += 1 * getColumnsList().size();
+ }
+ if (hasBatch()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(4, getBatch());
+ }
+ if (hasStartTime()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(5, getStartTime());
+ }
+ if (hasEndTime()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(6, getEndTime());
+ }
+ if (hasMaxVersions()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(7, getMaxVersions());
+ }
+ if (hasFilter()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(8, getFilter());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner result;
+
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.newBuilder()
+ private Builder() {}
+
+ private static Builder create() {
+ Builder builder = new Builder();
+ builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner();
+ return builder;
+ }
+
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner internalGetResult() {
+ return result;
+ }
+
+ public Builder clear() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "Cannot call clear() after build().");
+ }
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(result);
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.getDefaultInstance();
+ }
+
+ public boolean isInitialized() {
+ return result.isInitialized();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner build() {
+ if (result != null && !isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder.");
+ }
+ if (result.columns_ != java.util.Collections.EMPTY_LIST) {
+ result.columns_ =
+ java.util.Collections.unmodifiableList(result.columns_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.getDefaultInstance()) return this;
+ if (other.hasStartRow()) {
+ setStartRow(other.getStartRow());
+ }
+ if (other.hasEndRow()) {
+ setEndRow(other.getEndRow());
+ }
+ if (!other.columns_.isEmpty()) {
+ if (result.columns_.isEmpty()) {
+ result.columns_ = new java.util.ArrayList();
+ }
+ result.columns_.addAll(other.columns_);
+ }
+ if (other.hasBatch()) {
+ setBatch(other.getBatch());
+ }
+ if (other.hasStartTime()) {
+ setStartTime(other.getStartTime());
+ }
+ if (other.hasEndTime()) {
+ setEndTime(other.getEndTime());
+ }
+ if (other.hasMaxVersions()) {
+ setMaxVersions(other.getMaxVersions());
+ }
+ if (other.hasFilter()) {
+ setFilter(other.getFilter());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setStartRow(input.readBytes());
+ break;
+ }
+ case 18: {
+ setEndRow(input.readBytes());
+ break;
+ }
+ case 26: {
+ addColumns(input.readBytes());
+ break;
+ }
+ case 32: {
+ setBatch(input.readInt32());
+ break;
+ }
+ case 40: {
+ setStartTime(input.readInt64());
+ break;
+ }
+ case 48: {
+ setEndTime(input.readInt64());
+ break;
+ }
+ case 56: {
+ setMaxVersions(input.readInt32());
+ break;
+ }
+ case 66: {
+ setFilter(input.readString());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // optional bytes startRow = 1;
+ public boolean hasStartRow() {
+ return result.hasStartRow();
+ }
+ public com.google.protobuf.ByteString getStartRow() {
+ return result.getStartRow();
+ }
+ public Builder setStartRow(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasStartRow = true;
+ result.startRow_ = value;
+ return this;
+ }
+ public Builder clearStartRow() {
+ result.hasStartRow = false;
+ result.startRow_ = getDefaultInstance().getStartRow();
+ return this;
+ }
+
+ // optional bytes endRow = 2;
+ public boolean hasEndRow() {
+ return result.hasEndRow();
+ }
+ public com.google.protobuf.ByteString getEndRow() {
+ return result.getEndRow();
+ }
+ public Builder setEndRow(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasEndRow = true;
+ result.endRow_ = value;
+ return this;
+ }
+ public Builder clearEndRow() {
+ result.hasEndRow = false;
+ result.endRow_ = getDefaultInstance().getEndRow();
+ return this;
+ }
+
+ // repeated bytes columns = 3;
+ public java.util.List getColumnsList() {
+ return java.util.Collections.unmodifiableList(result.columns_);
+ }
+ public int getColumnsCount() {
+ return result.getColumnsCount();
+ }
+ public com.google.protobuf.ByteString getColumns(int index) {
+ return result.getColumns(index);
+ }
+ public Builder setColumns(int index, com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.columns_.set(index, value);
+ return this;
+ }
+ public Builder addColumns(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.columns_.isEmpty()) {
+ result.columns_ = new java.util.ArrayList();
+ }
+ result.columns_.add(value);
+ return this;
+ }
+ public Builder addAllColumns(
+ java.lang.Iterable extends com.google.protobuf.ByteString> values) {
+ if (result.columns_.isEmpty()) {
+ result.columns_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.columns_);
+ return this;
+ }
+ public Builder clearColumns() {
+ result.columns_ = java.util.Collections.emptyList();
+ return this;
+ }
+
+ // optional int32 batch = 4;
+ public boolean hasBatch() {
+ return result.hasBatch();
+ }
+ public int getBatch() {
+ return result.getBatch();
+ }
+ public Builder setBatch(int value) {
+ result.hasBatch = true;
+ result.batch_ = value;
+ return this;
+ }
+ public Builder clearBatch() {
+ result.hasBatch = false;
+ result.batch_ = 0;
+ return this;
+ }
+
+ // optional int64 startTime = 5;
+ public boolean hasStartTime() {
+ return result.hasStartTime();
+ }
+ public long getStartTime() {
+ return result.getStartTime();
+ }
+ public Builder setStartTime(long value) {
+ result.hasStartTime = true;
+ result.startTime_ = value;
+ return this;
+ }
+ public Builder clearStartTime() {
+ result.hasStartTime = false;
+ result.startTime_ = 0L;
+ return this;
+ }
+
+ // optional int64 endTime = 6;
+ public boolean hasEndTime() {
+ return result.hasEndTime();
+ }
+ public long getEndTime() {
+ return result.getEndTime();
+ }
+ public Builder setEndTime(long value) {
+ result.hasEndTime = true;
+ result.endTime_ = value;
+ return this;
+ }
+ public Builder clearEndTime() {
+ result.hasEndTime = false;
+ result.endTime_ = 0L;
+ return this;
+ }
+
+ // optional int32 maxVersions = 7;
+ public boolean hasMaxVersions() {
+ return result.hasMaxVersions();
+ }
+ public int getMaxVersions() {
+ return result.getMaxVersions();
+ }
+ public Builder setMaxVersions(int value) {
+ result.hasMaxVersions = true;
+ result.maxVersions_ = value;
+ return this;
+ }
+ public Builder clearMaxVersions() {
+ result.hasMaxVersions = false;
+ result.maxVersions_ = 0;
+ return this;
+ }
+
+ // optional string filter = 8;
+ public boolean hasFilter() {
+ return result.hasFilter();
+ }
+ public java.lang.String getFilter() {
+ return result.getFilter();
+ }
+ public Builder setFilter(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasFilter = true;
+ result.filter_ = value;
+ return this;
+ }
+ public Builder clearFilter() {
+ result.hasFilter = false;
+ result.filter_ = getDefaultInstance().getFilter();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.Scanner)
+ }
+
+ static {
+ defaultInstance = new Scanner(true);
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.internalForceInit();
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.Scanner)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\024ScannerMessage.proto\0223org.apache.hadoo" +
+ "p.hbase.stargate.protobuf.generated\"\224\001\n\007" +
+ "Scanner\022\020\n\010startRow\030\001 \001(\014\022\016\n\006endRow\030\002 \001(" +
+ "\014\022\017\n\007columns\030\003 \003(\014\022\r\n\005batch\030\004 \001(\005\022\021\n\tsta" +
+ "rtTime\030\005 \001(\003\022\017\n\007endTime\030\006 \001(\003\022\023\n\013maxVers" +
+ "ions\030\007 \001(\005\022\016\n\006filter\030\010 \001(\t"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Scanner_descriptor,
+ new java.lang.String[] { "StartRow", "EndRow", "Columns", "Batch", "StartTime", "EndTime", "MaxVersions", "Filter", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ public static void internalForceInit() {}
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/StorageClusterStatusMessage.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/StorageClusterStatusMessage.java
new file mode 100644
index 0000000..a420986
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/StorageClusterStatusMessage.java
@@ -0,0 +1,1638 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: StorageClusterStatusMessage.proto
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class StorageClusterStatusMessage {
+ private StorageClusterStatusMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class StorageClusterStatus extends
+ com.google.protobuf.GeneratedMessage {
+ // Use StorageClusterStatus.newBuilder() to construct.
+ private StorageClusterStatus() {
+ initFields();
+ }
+ private StorageClusterStatus(boolean noInit) {}
+
+ private static final StorageClusterStatus defaultInstance;
+ public static StorageClusterStatus getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public StorageClusterStatus getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_fieldAccessorTable;
+ }
+
+ public static final class Region extends
+ com.google.protobuf.GeneratedMessage {
+ // Use Region.newBuilder() to construct.
+ private Region() {
+ initFields();
+ }
+ private Region(boolean noInit) {}
+
+ private static final Region defaultInstance;
+ public static Region getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Region getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Region_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable;
+ }
+
+ // required bytes name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private boolean hasName;
+ private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasName() { return hasName; }
+ public com.google.protobuf.ByteString getName() { return name_; }
+
+ // optional int32 stores = 2;
+ public static final int STORES_FIELD_NUMBER = 2;
+ private boolean hasStores;
+ private int stores_ = 0;
+ public boolean hasStores() { return hasStores; }
+ public int getStores() { return stores_; }
+
+ // optional int32 storefiles = 3;
+ public static final int STOREFILES_FIELD_NUMBER = 3;
+ private boolean hasStorefiles;
+ private int storefiles_ = 0;
+ public boolean hasStorefiles() { return hasStorefiles; }
+ public int getStorefiles() { return storefiles_; }
+
+ // optional int32 storefileSizeMB = 4;
+ public static final int STOREFILESIZEMB_FIELD_NUMBER = 4;
+ private boolean hasStorefileSizeMB;
+ private int storefileSizeMB_ = 0;
+ public boolean hasStorefileSizeMB() { return hasStorefileSizeMB; }
+ public int getStorefileSizeMB() { return storefileSizeMB_; }
+
+ // optional int32 memstoreSizeMB = 5;
+ public static final int MEMSTORESIZEMB_FIELD_NUMBER = 5;
+ private boolean hasMemstoreSizeMB;
+ private int memstoreSizeMB_ = 0;
+ public boolean hasMemstoreSizeMB() { return hasMemstoreSizeMB; }
+ public int getMemstoreSizeMB() { return memstoreSizeMB_; }
+
+ // optional int32 storefileIndexSizeMB = 6;
+ public static final int STOREFILEINDEXSIZEMB_FIELD_NUMBER = 6;
+ private boolean hasStorefileIndexSizeMB;
+ private int storefileIndexSizeMB_ = 0;
+ public boolean hasStorefileIndexSizeMB() { return hasStorefileIndexSizeMB; }
+ public int getStorefileIndexSizeMB() { return storefileIndexSizeMB_; }
+
+ private void initFields() {
+ }
+ public final boolean isInitialized() {
+ if (!hasName) return false;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (hasName()) {
+ output.writeBytes(1, getName());
+ }
+ if (hasStores()) {
+ output.writeInt32(2, getStores());
+ }
+ if (hasStorefiles()) {
+ output.writeInt32(3, getStorefiles());
+ }
+ if (hasStorefileSizeMB()) {
+ output.writeInt32(4, getStorefileSizeMB());
+ }
+ if (hasMemstoreSizeMB()) {
+ output.writeInt32(5, getMemstoreSizeMB());
+ }
+ if (hasStorefileIndexSizeMB()) {
+ output.writeInt32(6, getStorefileIndexSizeMB());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasName()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getName());
+ }
+ if (hasStores()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(2, getStores());
+ }
+ if (hasStorefiles()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(3, getStorefiles());
+ }
+ if (hasStorefileSizeMB()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(4, getStorefileSizeMB());
+ }
+ if (hasMemstoreSizeMB()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(5, getMemstoreSizeMB());
+ }
+ if (hasStorefileIndexSizeMB()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(6, getStorefileIndexSizeMB());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region result;
+
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.newBuilder()
+ private Builder() {}
+
+ private static Builder create() {
+ Builder builder = new Builder();
+ builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region();
+ return builder;
+ }
+
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region internalGetResult() {
+ return result;
+ }
+
+ public Builder clear() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "Cannot call clear() after build().");
+ }
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(result);
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance();
+ }
+
+ public boolean isInitialized() {
+ return result.isInitialized();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region build() {
+ if (result != null && !isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder.");
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (other.hasStores()) {
+ setStores(other.getStores());
+ }
+ if (other.hasStorefiles()) {
+ setStorefiles(other.getStorefiles());
+ }
+ if (other.hasStorefileSizeMB()) {
+ setStorefileSizeMB(other.getStorefileSizeMB());
+ }
+ if (other.hasMemstoreSizeMB()) {
+ setMemstoreSizeMB(other.getMemstoreSizeMB());
+ }
+ if (other.hasStorefileIndexSizeMB()) {
+ setStorefileIndexSizeMB(other.getStorefileIndexSizeMB());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setName(input.readBytes());
+ break;
+ }
+ case 16: {
+ setStores(input.readInt32());
+ break;
+ }
+ case 24: {
+ setStorefiles(input.readInt32());
+ break;
+ }
+ case 32: {
+ setStorefileSizeMB(input.readInt32());
+ break;
+ }
+ case 40: {
+ setMemstoreSizeMB(input.readInt32());
+ break;
+ }
+ case 48: {
+ setStorefileIndexSizeMB(input.readInt32());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // required bytes name = 1;
+ public boolean hasName() {
+ return result.hasName();
+ }
+ public com.google.protobuf.ByteString getName() {
+ return result.getName();
+ }
+ public Builder setName(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasName = true;
+ result.name_ = value;
+ return this;
+ }
+ public Builder clearName() {
+ result.hasName = false;
+ result.name_ = getDefaultInstance().getName();
+ return this;
+ }
+
+ // optional int32 stores = 2;
+ public boolean hasStores() {
+ return result.hasStores();
+ }
+ public int getStores() {
+ return result.getStores();
+ }
+ public Builder setStores(int value) {
+ result.hasStores = true;
+ result.stores_ = value;
+ return this;
+ }
+ public Builder clearStores() {
+ result.hasStores = false;
+ result.stores_ = 0;
+ return this;
+ }
+
+ // optional int32 storefiles = 3;
+ public boolean hasStorefiles() {
+ return result.hasStorefiles();
+ }
+ public int getStorefiles() {
+ return result.getStorefiles();
+ }
+ public Builder setStorefiles(int value) {
+ result.hasStorefiles = true;
+ result.storefiles_ = value;
+ return this;
+ }
+ public Builder clearStorefiles() {
+ result.hasStorefiles = false;
+ result.storefiles_ = 0;
+ return this;
+ }
+
+ // optional int32 storefileSizeMB = 4;
+ public boolean hasStorefileSizeMB() {
+ return result.hasStorefileSizeMB();
+ }
+ public int getStorefileSizeMB() {
+ return result.getStorefileSizeMB();
+ }
+ public Builder setStorefileSizeMB(int value) {
+ result.hasStorefileSizeMB = true;
+ result.storefileSizeMB_ = value;
+ return this;
+ }
+ public Builder clearStorefileSizeMB() {
+ result.hasStorefileSizeMB = false;
+ result.storefileSizeMB_ = 0;
+ return this;
+ }
+
+ // optional int32 memstoreSizeMB = 5;
+ public boolean hasMemstoreSizeMB() {
+ return result.hasMemstoreSizeMB();
+ }
+ public int getMemstoreSizeMB() {
+ return result.getMemstoreSizeMB();
+ }
+ public Builder setMemstoreSizeMB(int value) {
+ result.hasMemstoreSizeMB = true;
+ result.memstoreSizeMB_ = value;
+ return this;
+ }
+ public Builder clearMemstoreSizeMB() {
+ result.hasMemstoreSizeMB = false;
+ result.memstoreSizeMB_ = 0;
+ return this;
+ }
+
+ // optional int32 storefileIndexSizeMB = 6;
+ public boolean hasStorefileIndexSizeMB() {
+ return result.hasStorefileIndexSizeMB();
+ }
+ public int getStorefileIndexSizeMB() {
+ return result.getStorefileIndexSizeMB();
+ }
+ public Builder setStorefileIndexSizeMB(int value) {
+ result.hasStorefileIndexSizeMB = true;
+ result.storefileIndexSizeMB_ = value;
+ return this;
+ }
+ public Builder clearStorefileIndexSizeMB() {
+ result.hasStorefileIndexSizeMB = false;
+ result.storefileIndexSizeMB_ = 0;
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatus.Region)
+ }
+
+ static {
+ defaultInstance = new Region(true);
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.internalForceInit();
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatus.Region)
+ }
+
+ public static final class Node extends
+ com.google.protobuf.GeneratedMessage {
+ // Use Node.newBuilder() to construct.
+ private Node() {
+ initFields();
+ }
+ private Node(boolean noInit) {}
+
+ private static final Node defaultInstance;
+ public static Node getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Node getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable;
+ }
+
+ // required string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private boolean hasName;
+ private java.lang.String name_ = "";
+ public boolean hasName() { return hasName; }
+ public java.lang.String getName() { return name_; }
+
+ // optional int64 startCode = 2;
+ public static final int STARTCODE_FIELD_NUMBER = 2;
+ private boolean hasStartCode;
+ private long startCode_ = 0L;
+ public boolean hasStartCode() { return hasStartCode; }
+ public long getStartCode() { return startCode_; }
+
+ // optional int32 requests = 3;
+ public static final int REQUESTS_FIELD_NUMBER = 3;
+ private boolean hasRequests;
+ private int requests_ = 0;
+ public boolean hasRequests() { return hasRequests; }
+ public int getRequests() { return requests_; }
+
+ // optional int32 heapSizeMB = 4;
+ public static final int HEAPSIZEMB_FIELD_NUMBER = 4;
+ private boolean hasHeapSizeMB;
+ private int heapSizeMB_ = 0;
+ public boolean hasHeapSizeMB() { return hasHeapSizeMB; }
+ public int getHeapSizeMB() { return heapSizeMB_; }
+
+ // optional int32 maxHeapSizeMB = 5;
+ public static final int MAXHEAPSIZEMB_FIELD_NUMBER = 5;
+ private boolean hasMaxHeapSizeMB;
+ private int maxHeapSizeMB_ = 0;
+ public boolean hasMaxHeapSizeMB() { return hasMaxHeapSizeMB; }
+ public int getMaxHeapSizeMB() { return maxHeapSizeMB_; }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatus.Region regions = 6;
+ public static final int REGIONS_FIELD_NUMBER = 6;
+ private java.util.List regions_ =
+ java.util.Collections.emptyList();
+ public java.util.List getRegionsList() {
+ return regions_;
+ }
+ public int getRegionsCount() { return regions_.size(); }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index) {
+ return regions_.get(index);
+ }
+
+ private void initFields() {
+ }
+ public final boolean isInitialized() {
+ if (!hasName) return false;
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region element : getRegionsList()) {
+ if (!element.isInitialized()) return false;
+ }
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (hasName()) {
+ output.writeString(1, getName());
+ }
+ if (hasStartCode()) {
+ output.writeInt64(2, getStartCode());
+ }
+ if (hasRequests()) {
+ output.writeInt32(3, getRequests());
+ }
+ if (hasHeapSizeMB()) {
+ output.writeInt32(4, getHeapSizeMB());
+ }
+ if (hasMaxHeapSizeMB()) {
+ output.writeInt32(5, getMaxHeapSizeMB());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region element : getRegionsList()) {
+ output.writeMessage(6, element);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasName()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(1, getName());
+ }
+ if (hasStartCode()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(2, getStartCode());
+ }
+ if (hasRequests()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(3, getRequests());
+ }
+ if (hasHeapSizeMB()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(4, getHeapSizeMB());
+ }
+ if (hasMaxHeapSizeMB()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(5, getMaxHeapSizeMB());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region element : getRegionsList()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(6, element);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node result;
+
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.newBuilder()
+ private Builder() {}
+
+ private static Builder create() {
+ Builder builder = new Builder();
+ builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node();
+ return builder;
+ }
+
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node internalGetResult() {
+ return result;
+ }
+
+ public Builder clear() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "Cannot call clear() after build().");
+ }
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(result);
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance();
+ }
+
+ public boolean isInitialized() {
+ return result.isInitialized();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node build() {
+ if (result != null && !isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder.");
+ }
+ if (result.regions_ != java.util.Collections.EMPTY_LIST) {
+ result.regions_ =
+ java.util.Collections.unmodifiableList(result.regions_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (other.hasStartCode()) {
+ setStartCode(other.getStartCode());
+ }
+ if (other.hasRequests()) {
+ setRequests(other.getRequests());
+ }
+ if (other.hasHeapSizeMB()) {
+ setHeapSizeMB(other.getHeapSizeMB());
+ }
+ if (other.hasMaxHeapSizeMB()) {
+ setMaxHeapSizeMB(other.getMaxHeapSizeMB());
+ }
+ if (!other.regions_.isEmpty()) {
+ if (result.regions_.isEmpty()) {
+ result.regions_ = new java.util.ArrayList();
+ }
+ result.regions_.addAll(other.regions_);
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setName(input.readString());
+ break;
+ }
+ case 16: {
+ setStartCode(input.readInt64());
+ break;
+ }
+ case 24: {
+ setRequests(input.readInt32());
+ break;
+ }
+ case 32: {
+ setHeapSizeMB(input.readInt32());
+ break;
+ }
+ case 40: {
+ setMaxHeapSizeMB(input.readInt32());
+ break;
+ }
+ case 50: {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addRegions(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // required string name = 1;
+ public boolean hasName() {
+ return result.hasName();
+ }
+ public java.lang.String getName() {
+ return result.getName();
+ }
+ public Builder setName(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasName = true;
+ result.name_ = value;
+ return this;
+ }
+ public Builder clearName() {
+ result.hasName = false;
+ result.name_ = getDefaultInstance().getName();
+ return this;
+ }
+
+ // optional int64 startCode = 2;
+ public boolean hasStartCode() {
+ return result.hasStartCode();
+ }
+ public long getStartCode() {
+ return result.getStartCode();
+ }
+ public Builder setStartCode(long value) {
+ result.hasStartCode = true;
+ result.startCode_ = value;
+ return this;
+ }
+ public Builder clearStartCode() {
+ result.hasStartCode = false;
+ result.startCode_ = 0L;
+ return this;
+ }
+
+ // optional int32 requests = 3;
+ public boolean hasRequests() {
+ return result.hasRequests();
+ }
+ public int getRequests() {
+ return result.getRequests();
+ }
+ public Builder setRequests(int value) {
+ result.hasRequests = true;
+ result.requests_ = value;
+ return this;
+ }
+ public Builder clearRequests() {
+ result.hasRequests = false;
+ result.requests_ = 0;
+ return this;
+ }
+
+ // optional int32 heapSizeMB = 4;
+ public boolean hasHeapSizeMB() {
+ return result.hasHeapSizeMB();
+ }
+ public int getHeapSizeMB() {
+ return result.getHeapSizeMB();
+ }
+ public Builder setHeapSizeMB(int value) {
+ result.hasHeapSizeMB = true;
+ result.heapSizeMB_ = value;
+ return this;
+ }
+ public Builder clearHeapSizeMB() {
+ result.hasHeapSizeMB = false;
+ result.heapSizeMB_ = 0;
+ return this;
+ }
+
+ // optional int32 maxHeapSizeMB = 5;
+ public boolean hasMaxHeapSizeMB() {
+ return result.hasMaxHeapSizeMB();
+ }
+ public int getMaxHeapSizeMB() {
+ return result.getMaxHeapSizeMB();
+ }
+ public Builder setMaxHeapSizeMB(int value) {
+ result.hasMaxHeapSizeMB = true;
+ result.maxHeapSizeMB_ = value;
+ return this;
+ }
+ public Builder clearMaxHeapSizeMB() {
+ result.hasMaxHeapSizeMB = false;
+ result.maxHeapSizeMB_ = 0;
+ return this;
+ }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatus.Region regions = 6;
+ public java.util.List getRegionsList() {
+ return java.util.Collections.unmodifiableList(result.regions_);
+ }
+ public int getRegionsCount() {
+ return result.getRegionsCount();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index) {
+ return result.getRegions(index);
+ }
+ public Builder setRegions(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.regions_.set(index, value);
+ return this;
+ }
+ public Builder setRegions(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder builderForValue) {
+ result.regions_.set(index, builderForValue.build());
+ return this;
+ }
+ public Builder addRegions(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.regions_.isEmpty()) {
+ result.regions_ = new java.util.ArrayList();
+ }
+ result.regions_.add(value);
+ return this;
+ }
+ public Builder addRegions(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder builderForValue) {
+ if (result.regions_.isEmpty()) {
+ result.regions_ = new java.util.ArrayList();
+ }
+ result.regions_.add(builderForValue.build());
+ return this;
+ }
+ public Builder addAllRegions(
+ java.lang.Iterable extends org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> values) {
+ if (result.regions_.isEmpty()) {
+ result.regions_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.regions_);
+ return this;
+ }
+ public Builder clearRegions() {
+ result.regions_ = java.util.Collections.emptyList();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatus.Node)
+ }
+
+ static {
+ defaultInstance = new Node(true);
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.internalForceInit();
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatus.Node)
+ }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
+ public static final int LIVENODES_FIELD_NUMBER = 1;
+ private java.util.List liveNodes_ =
+ java.util.Collections.emptyList();
+ public java.util.List getLiveNodesList() {
+ return liveNodes_;
+ }
+ public int getLiveNodesCount() { return liveNodes_.size(); }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getLiveNodes(int index) {
+ return liveNodes_.get(index);
+ }
+
+ // repeated string deadNodes = 2;
+ public static final int DEADNODES_FIELD_NUMBER = 2;
+ private java.util.List deadNodes_ =
+ java.util.Collections.emptyList();
+ public java.util.List getDeadNodesList() {
+ return deadNodes_;
+ }
+ public int getDeadNodesCount() { return deadNodes_.size(); }
+ public java.lang.String getDeadNodes(int index) {
+ return deadNodes_.get(index);
+ }
+
+ // optional int32 regions = 3;
+ public static final int REGIONS_FIELD_NUMBER = 3;
+ private boolean hasRegions;
+ private int regions_ = 0;
+ public boolean hasRegions() { return hasRegions; }
+ public int getRegions() { return regions_; }
+
+ // optional int32 requests = 4;
+ public static final int REQUESTS_FIELD_NUMBER = 4;
+ private boolean hasRequests;
+ private int requests_ = 0;
+ public boolean hasRequests() { return hasRequests; }
+ public int getRequests() { return requests_; }
+
+ // optional double averageLoad = 5;
+ public static final int AVERAGELOAD_FIELD_NUMBER = 5;
+ private boolean hasAverageLoad;
+ private double averageLoad_ = 0D;
+ public boolean hasAverageLoad() { return hasAverageLoad; }
+ public double getAverageLoad() { return averageLoad_; }
+
+ private void initFields() {
+ }
+ public final boolean isInitialized() {
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node element : getLiveNodesList()) {
+ if (!element.isInitialized()) return false;
+ }
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node element : getLiveNodesList()) {
+ output.writeMessage(1, element);
+ }
+ for (java.lang.String element : getDeadNodesList()) {
+ output.writeString(2, element);
+ }
+ if (hasRegions()) {
+ output.writeInt32(3, getRegions());
+ }
+ if (hasRequests()) {
+ output.writeInt32(4, getRequests());
+ }
+ if (hasAverageLoad()) {
+ output.writeDouble(5, getAverageLoad());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node element : getLiveNodesList()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, element);
+ }
+ {
+ int dataSize = 0;
+ for (java.lang.String element : getDeadNodesList()) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeStringSizeNoTag(element);
+ }
+ size += dataSize;
+ size += 1 * getDeadNodesList().size();
+ }
+ if (hasRegions()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(3, getRegions());
+ }
+ if (hasRequests()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(4, getRequests());
+ }
+ if (hasAverageLoad()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeDoubleSize(5, getAverageLoad());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus result;
+
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.newBuilder()
+ private Builder() {}
+
+ private static Builder create() {
+ Builder builder = new Builder();
+ builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus();
+ return builder;
+ }
+
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus internalGetResult() {
+ return result;
+ }
+
+ public Builder clear() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "Cannot call clear() after build().");
+ }
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(result);
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.getDefaultInstance();
+ }
+
+ public boolean isInitialized() {
+ return result.isInitialized();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus build() {
+ if (result != null && !isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder.");
+ }
+ if (result.liveNodes_ != java.util.Collections.EMPTY_LIST) {
+ result.liveNodes_ =
+ java.util.Collections.unmodifiableList(result.liveNodes_);
+ }
+ if (result.deadNodes_ != java.util.Collections.EMPTY_LIST) {
+ result.deadNodes_ =
+ java.util.Collections.unmodifiableList(result.deadNodes_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.getDefaultInstance()) return this;
+ if (!other.liveNodes_.isEmpty()) {
+ if (result.liveNodes_.isEmpty()) {
+ result.liveNodes_ = new java.util.ArrayList();
+ }
+ result.liveNodes_.addAll(other.liveNodes_);
+ }
+ if (!other.deadNodes_.isEmpty()) {
+ if (result.deadNodes_.isEmpty()) {
+ result.deadNodes_ = new java.util.ArrayList();
+ }
+ result.deadNodes_.addAll(other.deadNodes_);
+ }
+ if (other.hasRegions()) {
+ setRegions(other.getRegions());
+ }
+ if (other.hasRequests()) {
+ setRequests(other.getRequests());
+ }
+ if (other.hasAverageLoad()) {
+ setAverageLoad(other.getAverageLoad());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addLiveNodes(subBuilder.buildPartial());
+ break;
+ }
+ case 18: {
+ addDeadNodes(input.readString());
+ break;
+ }
+ case 24: {
+ setRegions(input.readInt32());
+ break;
+ }
+ case 32: {
+ setRequests(input.readInt32());
+ break;
+ }
+ case 41: {
+ setAverageLoad(input.readDouble());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
+ public java.util.List getLiveNodesList() {
+ return java.util.Collections.unmodifiableList(result.liveNodes_);
+ }
+ public int getLiveNodesCount() {
+ return result.getLiveNodesCount();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getLiveNodes(int index) {
+ return result.getLiveNodes(index);
+ }
+ public Builder setLiveNodes(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.liveNodes_.set(index, value);
+ return this;
+ }
+ public Builder setLiveNodes(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder builderForValue) {
+ result.liveNodes_.set(index, builderForValue.build());
+ return this;
+ }
+ public Builder addLiveNodes(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.liveNodes_.isEmpty()) {
+ result.liveNodes_ = new java.util.ArrayList();
+ }
+ result.liveNodes_.add(value);
+ return this;
+ }
+ public Builder addLiveNodes(org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder builderForValue) {
+ if (result.liveNodes_.isEmpty()) {
+ result.liveNodes_ = new java.util.ArrayList();
+ }
+ result.liveNodes_.add(builderForValue.build());
+ return this;
+ }
+ public Builder addAllLiveNodes(
+ java.lang.Iterable extends org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node> values) {
+ if (result.liveNodes_.isEmpty()) {
+ result.liveNodes_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.liveNodes_);
+ return this;
+ }
+ public Builder clearLiveNodes() {
+ result.liveNodes_ = java.util.Collections.emptyList();
+ return this;
+ }
+
+ // repeated string deadNodes = 2;
+ public java.util.List getDeadNodesList() {
+ return java.util.Collections.unmodifiableList(result.deadNodes_);
+ }
+ public int getDeadNodesCount() {
+ return result.getDeadNodesCount();
+ }
+ public java.lang.String getDeadNodes(int index) {
+ return result.getDeadNodes(index);
+ }
+ public Builder setDeadNodes(int index, java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.deadNodes_.set(index, value);
+ return this;
+ }
+ public Builder addDeadNodes(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.deadNodes_.isEmpty()) {
+ result.deadNodes_ = new java.util.ArrayList();
+ }
+ result.deadNodes_.add(value);
+ return this;
+ }
+ public Builder addAllDeadNodes(
+ java.lang.Iterable extends java.lang.String> values) {
+ if (result.deadNodes_.isEmpty()) {
+ result.deadNodes_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.deadNodes_);
+ return this;
+ }
+ public Builder clearDeadNodes() {
+ result.deadNodes_ = java.util.Collections.emptyList();
+ return this;
+ }
+
+ // optional int32 regions = 3;
+ public boolean hasRegions() {
+ return result.hasRegions();
+ }
+ public int getRegions() {
+ return result.getRegions();
+ }
+ public Builder setRegions(int value) {
+ result.hasRegions = true;
+ result.regions_ = value;
+ return this;
+ }
+ public Builder clearRegions() {
+ result.hasRegions = false;
+ result.regions_ = 0;
+ return this;
+ }
+
+ // optional int32 requests = 4;
+ public boolean hasRequests() {
+ return result.hasRequests();
+ }
+ public int getRequests() {
+ return result.getRequests();
+ }
+ public Builder setRequests(int value) {
+ result.hasRequests = true;
+ result.requests_ = value;
+ return this;
+ }
+ public Builder clearRequests() {
+ result.hasRequests = false;
+ result.requests_ = 0;
+ return this;
+ }
+
+ // optional double averageLoad = 5;
+ public boolean hasAverageLoad() {
+ return result.hasAverageLoad();
+ }
+ public double getAverageLoad() {
+ return result.getAverageLoad();
+ }
+ public Builder setAverageLoad(double value) {
+ result.hasAverageLoad = true;
+ result.averageLoad_ = value;
+ return this;
+ }
+ public Builder clearAverageLoad() {
+ result.hasAverageLoad = false;
+ result.averageLoad_ = 0D;
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatus)
+ }
+
+ static {
+ defaultInstance = new StorageClusterStatus(true);
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.internalForceInit();
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatus)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Region_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n!StorageClusterStatusMessage.proto\0223org" +
+ ".apache.hadoop.hbase.stargate.protobuf.g" +
+ "enerated\"\232\004\n\024StorageClusterStatus\022a\n\tliv" +
+ "eNodes\030\001 \003(\0132N.org.apache.hadoop.hbase.s" +
+ "targate.protobuf.generated.StorageCluste" +
+ "rStatus.Node\022\021\n\tdeadNodes\030\002 \003(\t\022\017\n\007regio" +
+ "ns\030\003 \001(\005\022\020\n\010requests\030\004 \001(\005\022\023\n\013averageLoa" +
+ "d\030\005 \001(\001\032\211\001\n\006Region\022\014\n\004name\030\001 \002(\014\022\016\n\006stor" +
+ "es\030\002 \001(\005\022\022\n\nstorefiles\030\003 \001(\005\022\027\n\017storefil" +
+ "eSizeMB\030\004 \001(\005\022\026\n\016memstoreSizeMB\030\005 \001(\005\022\034\n",
+ "\024storefileIndexSizeMB\030\006 \001(\005\032\307\001\n\004Node\022\014\n\004" +
+ "name\030\001 \002(\t\022\021\n\tstartCode\030\002 \001(\003\022\020\n\010request" +
+ "s\030\003 \001(\005\022\022\n\nheapSizeMB\030\004 \001(\005\022\025\n\rmaxHeapSi" +
+ "zeMB\030\005 \001(\005\022a\n\007regions\030\006 \003(\0132P.org.apache" +
+ ".hadoop.hbase.stargate.protobuf.generate" +
+ "d.StorageClusterStatus.Region"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_descriptor,
+ new java.lang.String[] { "LiveNodes", "DeadNodes", "Regions", "Requests", "AverageLoad", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Builder.class);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Region_descriptor =
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_descriptor.getNestedTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Region_descriptor,
+ new java.lang.String[] { "Name", "Stores", "Storefiles", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder.class);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_descriptor =
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_descriptor.getNestedTypes().get(1);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_StorageClusterStatus_Node_descriptor,
+ new java.lang.String[] { "Name", "StartCode", "Requests", "HeapSizeMB", "MaxHeapSizeMB", "Regions", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ public static void internalForceInit() {}
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableInfoMessage.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableInfoMessage.java
new file mode 100644
index 0000000..7a4ef17
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableInfoMessage.java
@@ -0,0 +1,902 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: TableInfoMessage.proto
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class TableInfoMessage {
+ private TableInfoMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class TableInfo extends
+ com.google.protobuf.GeneratedMessage {
+ // Use TableInfo.newBuilder() to construct.
+ private TableInfo() {
+ initFields();
+ }
+ private TableInfo(boolean noInit) {}
+
+ private static final TableInfo defaultInstance;
+ public static TableInfo getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TableInfo getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_fieldAccessorTable;
+ }
+
+ public static final class Region extends
+ com.google.protobuf.GeneratedMessage {
+ // Use Region.newBuilder() to construct.
+ private Region() {
+ initFields();
+ }
+ private Region(boolean noInit) {}
+
+ private static final Region defaultInstance;
+ public static Region getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Region getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_fieldAccessorTable;
+ }
+
+ // required string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private boolean hasName;
+ private java.lang.String name_ = "";
+ public boolean hasName() { return hasName; }
+ public java.lang.String getName() { return name_; }
+
+ // optional bytes startKey = 2;
+ public static final int STARTKEY_FIELD_NUMBER = 2;
+ private boolean hasStartKey;
+ private com.google.protobuf.ByteString startKey_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasStartKey() { return hasStartKey; }
+ public com.google.protobuf.ByteString getStartKey() { return startKey_; }
+
+ // optional bytes endKey = 3;
+ public static final int ENDKEY_FIELD_NUMBER = 3;
+ private boolean hasEndKey;
+ private com.google.protobuf.ByteString endKey_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasEndKey() { return hasEndKey; }
+ public com.google.protobuf.ByteString getEndKey() { return endKey_; }
+
+ // optional int64 id = 4;
+ public static final int ID_FIELD_NUMBER = 4;
+ private boolean hasId;
+ private long id_ = 0L;
+ public boolean hasId() { return hasId; }
+ public long getId() { return id_; }
+
+ // optional string location = 5;
+ public static final int LOCATION_FIELD_NUMBER = 5;
+ private boolean hasLocation;
+ private java.lang.String location_ = "";
+ public boolean hasLocation() { return hasLocation; }
+ public java.lang.String getLocation() { return location_; }
+
+ private void initFields() {
+ }
+ public final boolean isInitialized() {
+ if (!hasName) return false;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (hasName()) {
+ output.writeString(1, getName());
+ }
+ if (hasStartKey()) {
+ output.writeBytes(2, getStartKey());
+ }
+ if (hasEndKey()) {
+ output.writeBytes(3, getEndKey());
+ }
+ if (hasId()) {
+ output.writeInt64(4, getId());
+ }
+ if (hasLocation()) {
+ output.writeString(5, getLocation());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasName()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(1, getName());
+ }
+ if (hasStartKey()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getStartKey());
+ }
+ if (hasEndKey()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(3, getEndKey());
+ }
+ if (hasId()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(4, getId());
+ }
+ if (hasLocation()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(5, getLocation());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region result;
+
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.newBuilder()
+ private Builder() {}
+
+ private static Builder create() {
+ Builder builder = new Builder();
+ builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region();
+ return builder;
+ }
+
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region internalGetResult() {
+ return result;
+ }
+
+ public Builder clear() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "Cannot call clear() after build().");
+ }
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(result);
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.getDefaultInstance();
+ }
+
+ public boolean isInitialized() {
+ return result.isInitialized();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region build() {
+ if (result != null && !isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder.");
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (other.hasStartKey()) {
+ setStartKey(other.getStartKey());
+ }
+ if (other.hasEndKey()) {
+ setEndKey(other.getEndKey());
+ }
+ if (other.hasId()) {
+ setId(other.getId());
+ }
+ if (other.hasLocation()) {
+ setLocation(other.getLocation());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setName(input.readString());
+ break;
+ }
+ case 18: {
+ setStartKey(input.readBytes());
+ break;
+ }
+ case 26: {
+ setEndKey(input.readBytes());
+ break;
+ }
+ case 32: {
+ setId(input.readInt64());
+ break;
+ }
+ case 42: {
+ setLocation(input.readString());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // required string name = 1;
+ public boolean hasName() {
+ return result.hasName();
+ }
+ public java.lang.String getName() {
+ return result.getName();
+ }
+ public Builder setName(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasName = true;
+ result.name_ = value;
+ return this;
+ }
+ public Builder clearName() {
+ result.hasName = false;
+ result.name_ = getDefaultInstance().getName();
+ return this;
+ }
+
+ // optional bytes startKey = 2;
+ public boolean hasStartKey() {
+ return result.hasStartKey();
+ }
+ public com.google.protobuf.ByteString getStartKey() {
+ return result.getStartKey();
+ }
+ public Builder setStartKey(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasStartKey = true;
+ result.startKey_ = value;
+ return this;
+ }
+ public Builder clearStartKey() {
+ result.hasStartKey = false;
+ result.startKey_ = getDefaultInstance().getStartKey();
+ return this;
+ }
+
+ // optional bytes endKey = 3;
+ public boolean hasEndKey() {
+ return result.hasEndKey();
+ }
+ public com.google.protobuf.ByteString getEndKey() {
+ return result.getEndKey();
+ }
+ public Builder setEndKey(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasEndKey = true;
+ result.endKey_ = value;
+ return this;
+ }
+ public Builder clearEndKey() {
+ result.hasEndKey = false;
+ result.endKey_ = getDefaultInstance().getEndKey();
+ return this;
+ }
+
+ // optional int64 id = 4;
+ public boolean hasId() {
+ return result.hasId();
+ }
+ public long getId() {
+ return result.getId();
+ }
+ public Builder setId(long value) {
+ result.hasId = true;
+ result.id_ = value;
+ return this;
+ }
+ public Builder clearId() {
+ result.hasId = false;
+ result.id_ = 0L;
+ return this;
+ }
+
+ // optional string location = 5;
+ public boolean hasLocation() {
+ return result.hasLocation();
+ }
+ public java.lang.String getLocation() {
+ return result.getLocation();
+ }
+ public Builder setLocation(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasLocation = true;
+ result.location_ = value;
+ return this;
+ }
+ public Builder clearLocation() {
+ result.hasLocation = false;
+ result.location_ = getDefaultInstance().getLocation();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfo.Region)
+ }
+
+ static {
+ defaultInstance = new Region(true);
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.internalForceInit();
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfo.Region)
+ }
+
+ // required string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private boolean hasName;
+ private java.lang.String name_ = "";
+ public boolean hasName() { return hasName; }
+ public java.lang.String getName() { return name_; }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfo.Region regions = 2;
+ public static final int REGIONS_FIELD_NUMBER = 2;
+ private java.util.List regions_ =
+ java.util.Collections.emptyList();
+ public java.util.List getRegionsList() {
+ return regions_;
+ }
+ public int getRegionsCount() { return regions_.size(); }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region getRegions(int index) {
+ return regions_.get(index);
+ }
+
+ private void initFields() {
+ }
+ public final boolean isInitialized() {
+ if (!hasName) return false;
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region element : getRegionsList()) {
+ if (!element.isInitialized()) return false;
+ }
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (hasName()) {
+ output.writeString(1, getName());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region element : getRegionsList()) {
+ output.writeMessage(2, element);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasName()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(1, getName());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region element : getRegionsList()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, element);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo result;
+
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.newBuilder()
+ private Builder() {}
+
+ private static Builder create() {
+ Builder builder = new Builder();
+ builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo();
+ return builder;
+ }
+
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo internalGetResult() {
+ return result;
+ }
+
+ public Builder clear() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "Cannot call clear() after build().");
+ }
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(result);
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.getDefaultInstance();
+ }
+
+ public boolean isInitialized() {
+ return result.isInitialized();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo build() {
+ if (result != null && !isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder.");
+ }
+ if (result.regions_ != java.util.Collections.EMPTY_LIST) {
+ result.regions_ =
+ java.util.Collections.unmodifiableList(result.regions_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (!other.regions_.isEmpty()) {
+ if (result.regions_.isEmpty()) {
+ result.regions_ = new java.util.ArrayList();
+ }
+ result.regions_.addAll(other.regions_);
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setName(input.readString());
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addRegions(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // required string name = 1;
+ public boolean hasName() {
+ return result.hasName();
+ }
+ public java.lang.String getName() {
+ return result.getName();
+ }
+ public Builder setName(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasName = true;
+ result.name_ = value;
+ return this;
+ }
+ public Builder clearName() {
+ result.hasName = false;
+ result.name_ = getDefaultInstance().getName();
+ return this;
+ }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfo.Region regions = 2;
+ public java.util.List getRegionsList() {
+ return java.util.Collections.unmodifiableList(result.regions_);
+ }
+ public int getRegionsCount() {
+ return result.getRegionsCount();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region getRegions(int index) {
+ return result.getRegions(index);
+ }
+ public Builder setRegions(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.regions_.set(index, value);
+ return this;
+ }
+ public Builder setRegions(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder builderForValue) {
+ result.regions_.set(index, builderForValue.build());
+ return this;
+ }
+ public Builder addRegions(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.regions_.isEmpty()) {
+ result.regions_ = new java.util.ArrayList();
+ }
+ result.regions_.add(value);
+ return this;
+ }
+ public Builder addRegions(org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder builderForValue) {
+ if (result.regions_.isEmpty()) {
+ result.regions_ = new java.util.ArrayList();
+ }
+ result.regions_.add(builderForValue.build());
+ return this;
+ }
+ public Builder addAllRegions(
+ java.lang.Iterable extends org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region> values) {
+ if (result.regions_.isEmpty()) {
+ result.regions_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.regions_);
+ return this;
+ }
+ public Builder clearRegions() {
+ result.regions_ = java.util.Collections.emptyList();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfo)
+ }
+
+ static {
+ defaultInstance = new TableInfo(true);
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.internalForceInit();
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfo)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\026TableInfoMessage.proto\0223org.apache.had" +
+ "oop.hbase.stargate.protobuf.generated\"\311\001" +
+ "\n\tTableInfo\022\014\n\004name\030\001 \002(\t\022V\n\007regions\030\002 \003" +
+ "(\0132E.org.apache.hadoop.hbase.stargate.pr" +
+ "otobuf.generated.TableInfo.Region\032V\n\006Reg" +
+ "ion\022\014\n\004name\030\001 \002(\t\022\020\n\010startKey\030\002 \001(\014\022\016\n\006e" +
+ "ndKey\030\003 \001(\014\022\n\n\002id\030\004 \001(\003\022\020\n\010location\030\005 \001(" +
+ "\t"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_descriptor,
+ new java.lang.String[] { "Name", "Regions", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Builder.class);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_descriptor =
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_descriptor.getNestedTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableInfo_Region_descriptor,
+ new java.lang.String[] { "Name", "StartKey", "EndKey", "Id", "Location", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableInfoMessage.TableInfo.Region.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ public static void internalForceInit() {}
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableListMessage.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableListMessage.java
new file mode 100644
index 0000000..9d246aa
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableListMessage.java
@@ -0,0 +1,377 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: TableListMessage.proto
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class TableListMessage {
+ private TableListMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class TableList extends
+ com.google.protobuf.GeneratedMessage {
+ // Use TableList.newBuilder() to construct.
+ private TableList() {
+ initFields();
+ }
+ private TableList(boolean noInit) {}
+
+ private static final TableList defaultInstance;
+ public static TableList getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TableList getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_fieldAccessorTable;
+ }
+
+ // repeated string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private java.util.List name_ =
+ java.util.Collections.emptyList();
+ public java.util.List getNameList() {
+ return name_;
+ }
+ public int getNameCount() { return name_.size(); }
+ public java.lang.String getName(int index) {
+ return name_.get(index);
+ }
+
+ private void initFields() {
+ }
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ for (java.lang.String element : getNameList()) {
+ output.writeString(1, element);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ {
+ int dataSize = 0;
+ for (java.lang.String element : getNameList()) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeStringSizeNoTag(element);
+ }
+ size += dataSize;
+ size += 1 * getNameList().size();
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList result;
+
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.newBuilder()
+ private Builder() {}
+
+ private static Builder create() {
+ Builder builder = new Builder();
+ builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList();
+ return builder;
+ }
+
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList internalGetResult() {
+ return result;
+ }
+
+ public Builder clear() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "Cannot call clear() after build().");
+ }
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(result);
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.getDefaultInstance();
+ }
+
+ public boolean isInitialized() {
+ return result.isInitialized();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList build() {
+ if (result != null && !isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder.");
+ }
+ if (result.name_ != java.util.Collections.EMPTY_LIST) {
+ result.name_ =
+ java.util.Collections.unmodifiableList(result.name_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.getDefaultInstance()) return this;
+ if (!other.name_.isEmpty()) {
+ if (result.name_.isEmpty()) {
+ result.name_ = new java.util.ArrayList();
+ }
+ result.name_.addAll(other.name_);
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ addName(input.readString());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // repeated string name = 1;
+ public java.util.List getNameList() {
+ return java.util.Collections.unmodifiableList(result.name_);
+ }
+ public int getNameCount() {
+ return result.getNameCount();
+ }
+ public java.lang.String getName(int index) {
+ return result.getName(index);
+ }
+ public Builder setName(int index, java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.name_.set(index, value);
+ return this;
+ }
+ public Builder addName(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.name_.isEmpty()) {
+ result.name_ = new java.util.ArrayList();
+ }
+ result.name_.add(value);
+ return this;
+ }
+ public Builder addAllName(
+ java.lang.Iterable extends java.lang.String> values) {
+ if (result.name_.isEmpty()) {
+ result.name_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.name_);
+ return this;
+ }
+ public Builder clearName() {
+ result.name_ = java.util.Collections.emptyList();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.TableList)
+ }
+
+ static {
+ defaultInstance = new TableList(true);
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.internalForceInit();
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.TableList)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\026TableListMessage.proto\0223org.apache.had" +
+ "oop.hbase.stargate.protobuf.generated\"\031\n" +
+ "\tTableList\022\014\n\004name\030\001 \003(\t"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableList_descriptor,
+ new java.lang.String[] { "Name", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableListMessage.TableList.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ public static void internalForceInit() {}
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableSchemaMessage.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableSchemaMessage.java
new file mode 100644
index 0000000..db99a0f
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/TableSchemaMessage.java
@@ -0,0 +1,949 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: TableSchemaMessage.proto
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class TableSchemaMessage {
+ private TableSchemaMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class TableSchema extends
+ com.google.protobuf.GeneratedMessage {
+ // Use TableSchema.newBuilder() to construct.
+ private TableSchema() {
+ initFields();
+ }
+ private TableSchema(boolean noInit) {}
+
+ private static final TableSchema defaultInstance;
+ public static TableSchema getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TableSchema getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_fieldAccessorTable;
+ }
+
+ public static final class Attribute extends
+ com.google.protobuf.GeneratedMessage {
+ // Use Attribute.newBuilder() to construct.
+ private Attribute() {
+ initFields();
+ }
+ private Attribute(boolean noInit) {}
+
+ private static final Attribute defaultInstance;
+ public static Attribute getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Attribute getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_fieldAccessorTable;
+ }
+
+ // required string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private boolean hasName;
+ private java.lang.String name_ = "";
+ public boolean hasName() { return hasName; }
+ public java.lang.String getName() { return name_; }
+
+ // required string value = 2;
+ public static final int VALUE_FIELD_NUMBER = 2;
+ private boolean hasValue;
+ private java.lang.String value_ = "";
+ public boolean hasValue() { return hasValue; }
+ public java.lang.String getValue() { return value_; }
+
+ private void initFields() {
+ }
+ public final boolean isInitialized() {
+ if (!hasName) return false;
+ if (!hasValue) return false;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (hasName()) {
+ output.writeString(1, getName());
+ }
+ if (hasValue()) {
+ output.writeString(2, getValue());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasName()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(1, getName());
+ }
+ if (hasValue()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(2, getValue());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute result;
+
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.newBuilder()
+ private Builder() {}
+
+ private static Builder create() {
+ Builder builder = new Builder();
+ builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute();
+ return builder;
+ }
+
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute internalGetResult() {
+ return result;
+ }
+
+ public Builder clear() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "Cannot call clear() after build().");
+ }
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(result);
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDefaultInstance();
+ }
+
+ public boolean isInitialized() {
+ return result.isInitialized();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute build() {
+ if (result != null && !isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder.");
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (other.hasValue()) {
+ setValue(other.getValue());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setName(input.readString());
+ break;
+ }
+ case 18: {
+ setValue(input.readString());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // required string name = 1;
+ public boolean hasName() {
+ return result.hasName();
+ }
+ public java.lang.String getName() {
+ return result.getName();
+ }
+ public Builder setName(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasName = true;
+ result.name_ = value;
+ return this;
+ }
+ public Builder clearName() {
+ result.hasName = false;
+ result.name_ = getDefaultInstance().getName();
+ return this;
+ }
+
+ // required string value = 2;
+ public boolean hasValue() {
+ return result.hasValue();
+ }
+ public java.lang.String getValue() {
+ return result.getValue();
+ }
+ public Builder setValue(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasValue = true;
+ result.value_ = value;
+ return this;
+ }
+ public Builder clearValue() {
+ result.hasValue = false;
+ result.value_ = getDefaultInstance().getValue();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchema.Attribute)
+ }
+
+ static {
+ defaultInstance = new Attribute(true);
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internalForceInit();
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchema.Attribute)
+ }
+
+ // optional string name = 1;
+ public static final int NAME_FIELD_NUMBER = 1;
+ private boolean hasName;
+ private java.lang.String name_ = "";
+ public boolean hasName() { return hasName; }
+ public java.lang.String getName() { return name_; }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchema.Attribute attrs = 2;
+ public static final int ATTRS_FIELD_NUMBER = 2;
+ private java.util.List attrs_ =
+ java.util.Collections.emptyList();
+ public java.util.List getAttrsList() {
+ return attrs_;
+ }
+ public int getAttrsCount() { return attrs_.size(); }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getAttrs(int index) {
+ return attrs_.get(index);
+ }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema columns = 3;
+ public static final int COLUMNS_FIELD_NUMBER = 3;
+ private java.util.List columns_ =
+ java.util.Collections.emptyList();
+ public java.util.List getColumnsList() {
+ return columns_;
+ }
+ public int getColumnsCount() { return columns_.size(); }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema getColumns(int index) {
+ return columns_.get(index);
+ }
+
+ // optional bool inMemory = 4;
+ public static final int INMEMORY_FIELD_NUMBER = 4;
+ private boolean hasInMemory;
+ private boolean inMemory_ = false;
+ public boolean hasInMemory() { return hasInMemory; }
+ public boolean getInMemory() { return inMemory_; }
+
+ // optional bool readOnly = 5;
+ public static final int READONLY_FIELD_NUMBER = 5;
+ private boolean hasReadOnly;
+ private boolean readOnly_ = false;
+ public boolean hasReadOnly() { return hasReadOnly; }
+ public boolean getReadOnly() { return readOnly_; }
+
+ private void initFields() {
+ }
+ public final boolean isInitialized() {
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute element : getAttrsList()) {
+ if (!element.isInitialized()) return false;
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema element : getColumnsList()) {
+ if (!element.isInitialized()) return false;
+ }
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (hasName()) {
+ output.writeString(1, getName());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute element : getAttrsList()) {
+ output.writeMessage(2, element);
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema element : getColumnsList()) {
+ output.writeMessage(3, element);
+ }
+ if (hasInMemory()) {
+ output.writeBool(4, getInMemory());
+ }
+ if (hasReadOnly()) {
+ output.writeBool(5, getReadOnly());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasName()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(1, getName());
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute element : getAttrsList()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, element);
+ }
+ for (org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema element : getColumnsList()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, element);
+ }
+ if (hasInMemory()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(4, getInMemory());
+ }
+ if (hasReadOnly()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(5, getReadOnly());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema result;
+
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.newBuilder()
+ private Builder() {}
+
+ private static Builder create() {
+ Builder builder = new Builder();
+ builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema();
+ return builder;
+ }
+
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema internalGetResult() {
+ return result;
+ }
+
+ public Builder clear() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "Cannot call clear() after build().");
+ }
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(result);
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.getDefaultInstance();
+ }
+
+ public boolean isInitialized() {
+ return result.isInitialized();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema build() {
+ if (result != null && !isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder.");
+ }
+ if (result.attrs_ != java.util.Collections.EMPTY_LIST) {
+ result.attrs_ =
+ java.util.Collections.unmodifiableList(result.attrs_);
+ }
+ if (result.columns_ != java.util.Collections.EMPTY_LIST) {
+ result.columns_ =
+ java.util.Collections.unmodifiableList(result.columns_);
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.getDefaultInstance()) return this;
+ if (other.hasName()) {
+ setName(other.getName());
+ }
+ if (!other.attrs_.isEmpty()) {
+ if (result.attrs_.isEmpty()) {
+ result.attrs_ = new java.util.ArrayList();
+ }
+ result.attrs_.addAll(other.attrs_);
+ }
+ if (!other.columns_.isEmpty()) {
+ if (result.columns_.isEmpty()) {
+ result.columns_ = new java.util.ArrayList();
+ }
+ result.columns_.addAll(other.columns_);
+ }
+ if (other.hasInMemory()) {
+ setInMemory(other.getInMemory());
+ }
+ if (other.hasReadOnly()) {
+ setReadOnly(other.getReadOnly());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setName(input.readString());
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addAttrs(subBuilder.buildPartial());
+ break;
+ }
+ case 26: {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder subBuilder = org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addColumns(subBuilder.buildPartial());
+ break;
+ }
+ case 32: {
+ setInMemory(input.readBool());
+ break;
+ }
+ case 40: {
+ setReadOnly(input.readBool());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // optional string name = 1;
+ public boolean hasName() {
+ return result.hasName();
+ }
+ public java.lang.String getName() {
+ return result.getName();
+ }
+ public Builder setName(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasName = true;
+ result.name_ = value;
+ return this;
+ }
+ public Builder clearName() {
+ result.hasName = false;
+ result.name_ = getDefaultInstance().getName();
+ return this;
+ }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchema.Attribute attrs = 2;
+ public java.util.List getAttrsList() {
+ return java.util.Collections.unmodifiableList(result.attrs_);
+ }
+ public int getAttrsCount() {
+ return result.getAttrsCount();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute getAttrs(int index) {
+ return result.getAttrs(index);
+ }
+ public Builder setAttrs(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.attrs_.set(index, value);
+ return this;
+ }
+ public Builder setAttrs(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder builderForValue) {
+ result.attrs_.set(index, builderForValue.build());
+ return this;
+ }
+ public Builder addAttrs(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.attrs_.isEmpty()) {
+ result.attrs_ = new java.util.ArrayList();
+ }
+ result.attrs_.add(value);
+ return this;
+ }
+ public Builder addAttrs(org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder builderForValue) {
+ if (result.attrs_.isEmpty()) {
+ result.attrs_ = new java.util.ArrayList();
+ }
+ result.attrs_.add(builderForValue.build());
+ return this;
+ }
+ public Builder addAllAttrs(
+ java.lang.Iterable extends org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute> values) {
+ if (result.attrs_.isEmpty()) {
+ result.attrs_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.attrs_);
+ return this;
+ }
+ public Builder clearAttrs() {
+ result.attrs_ = java.util.Collections.emptyList();
+ return this;
+ }
+
+ // repeated .org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchema columns = 3;
+ public java.util.List getColumnsList() {
+ return java.util.Collections.unmodifiableList(result.columns_);
+ }
+ public int getColumnsCount() {
+ return result.getColumnsCount();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema getColumns(int index) {
+ return result.getColumns(index);
+ }
+ public Builder setColumns(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.columns_.set(index, value);
+ return this;
+ }
+ public Builder setColumns(int index, org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder builderForValue) {
+ result.columns_.set(index, builderForValue.build());
+ return this;
+ }
+ public Builder addColumns(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ if (result.columns_.isEmpty()) {
+ result.columns_ = new java.util.ArrayList();
+ }
+ result.columns_.add(value);
+ return this;
+ }
+ public Builder addColumns(org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema.Builder builderForValue) {
+ if (result.columns_.isEmpty()) {
+ result.columns_ = new java.util.ArrayList();
+ }
+ result.columns_.add(builderForValue.build());
+ return this;
+ }
+ public Builder addAllColumns(
+ java.lang.Iterable extends org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.ColumnSchema> values) {
+ if (result.columns_.isEmpty()) {
+ result.columns_ = new java.util.ArrayList();
+ }
+ super.addAll(values, result.columns_);
+ return this;
+ }
+ public Builder clearColumns() {
+ result.columns_ = java.util.Collections.emptyList();
+ return this;
+ }
+
+ // optional bool inMemory = 4;
+ public boolean hasInMemory() {
+ return result.hasInMemory();
+ }
+ public boolean getInMemory() {
+ return result.getInMemory();
+ }
+ public Builder setInMemory(boolean value) {
+ result.hasInMemory = true;
+ result.inMemory_ = value;
+ return this;
+ }
+ public Builder clearInMemory() {
+ result.hasInMemory = false;
+ result.inMemory_ = false;
+ return this;
+ }
+
+ // optional bool readOnly = 5;
+ public boolean hasReadOnly() {
+ return result.hasReadOnly();
+ }
+ public boolean getReadOnly() {
+ return result.getReadOnly();
+ }
+ public Builder setReadOnly(boolean value) {
+ result.hasReadOnly = true;
+ result.readOnly_ = value;
+ return this;
+ }
+ public Builder clearReadOnly() {
+ result.hasReadOnly = false;
+ result.readOnly_ = false;
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchema)
+ }
+
+ static {
+ defaultInstance = new TableSchema(true);
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.internalForceInit();
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchema)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\030TableSchemaMessage.proto\0223org.apache.h" +
+ "adoop.hbase.stargate.protobuf.generated\032" +
+ "\031ColumnSchemaMessage.proto\"\230\002\n\013TableSche" +
+ "ma\022\014\n\004name\030\001 \001(\t\022Y\n\005attrs\030\002 \003(\0132J.org.ap" +
+ "ache.hadoop.hbase.stargate.protobuf.gene" +
+ "rated.TableSchema.Attribute\022R\n\007columns\030\003" +
+ " \003(\0132A.org.apache.hadoop.hbase.stargate." +
+ "protobuf.generated.ColumnSchema\022\020\n\010inMem" +
+ "ory\030\004 \001(\010\022\020\n\010readOnly\030\005 \001(\010\032(\n\tAttribute" +
+ "\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor,
+ new java.lang.String[] { "Name", "Attrs", "Columns", "InMemory", "ReadOnly", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Builder.class);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_descriptor =
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_descriptor.getNestedTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_TableSchema_Attribute_descriptor,
+ new java.lang.String[] { "Name", "Value", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.TableSchemaMessage.TableSchema.Attribute.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ org.apache.hadoop.hbase.stargate.protobuf.generated.ColumnSchemaMessage.getDescriptor(),
+ }, assigner);
+ }
+
+ public static void internalForceInit() {}
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/VersionMessage.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/VersionMessage.java
new file mode 100644
index 0000000..54b7039
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/protobuf/generated/VersionMessage.java
@@ -0,0 +1,511 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: VersionMessage.proto
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+public final class VersionMessage {
+ private VersionMessage() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public static final class Version extends
+ com.google.protobuf.GeneratedMessage {
+ // Use Version.newBuilder() to construct.
+ private Version() {
+ initFields();
+ }
+ private Version(boolean noInit) {}
+
+ private static final Version defaultInstance;
+ public static Version getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public Version getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_fieldAccessorTable;
+ }
+
+ // optional string stargateVersion = 1;
+ public static final int STARGATEVERSION_FIELD_NUMBER = 1;
+ private boolean hasStargateVersion;
+ private java.lang.String stargateVersion_ = "";
+ public boolean hasStargateVersion() { return hasStargateVersion; }
+ public java.lang.String getStargateVersion() { return stargateVersion_; }
+
+ // optional string jvmVersion = 2;
+ public static final int JVMVERSION_FIELD_NUMBER = 2;
+ private boolean hasJvmVersion;
+ private java.lang.String jvmVersion_ = "";
+ public boolean hasJvmVersion() { return hasJvmVersion; }
+ public java.lang.String getJvmVersion() { return jvmVersion_; }
+
+ // optional string osVersion = 3;
+ public static final int OSVERSION_FIELD_NUMBER = 3;
+ private boolean hasOsVersion;
+ private java.lang.String osVersion_ = "";
+ public boolean hasOsVersion() { return hasOsVersion; }
+ public java.lang.String getOsVersion() { return osVersion_; }
+
+ // optional string serverVersion = 4;
+ public static final int SERVERVERSION_FIELD_NUMBER = 4;
+ private boolean hasServerVersion;
+ private java.lang.String serverVersion_ = "";
+ public boolean hasServerVersion() { return hasServerVersion; }
+ public java.lang.String getServerVersion() { return serverVersion_; }
+
+ // optional string jerseyVersion = 5;
+ public static final int JERSEYVERSION_FIELD_NUMBER = 5;
+ private boolean hasJerseyVersion;
+ private java.lang.String jerseyVersion_ = "";
+ public boolean hasJerseyVersion() { return hasJerseyVersion; }
+ public java.lang.String getJerseyVersion() { return jerseyVersion_; }
+
+ private void initFields() {
+ }
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (hasStargateVersion()) {
+ output.writeString(1, getStargateVersion());
+ }
+ if (hasJvmVersion()) {
+ output.writeString(2, getJvmVersion());
+ }
+ if (hasOsVersion()) {
+ output.writeString(3, getOsVersion());
+ }
+ if (hasServerVersion()) {
+ output.writeString(4, getServerVersion());
+ }
+ if (hasJerseyVersion()) {
+ output.writeString(5, getJerseyVersion());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (hasStargateVersion()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(1, getStargateVersion());
+ }
+ if (hasJvmVersion()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(2, getJvmVersion());
+ }
+ if (hasOsVersion()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(3, getOsVersion());
+ }
+ if (hasServerVersion()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(4, getServerVersion());
+ }
+ if (hasJerseyVersion()) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeStringSize(5, getJerseyVersion());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder {
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version result;
+
+ // Construct using org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.newBuilder()
+ private Builder() {}
+
+ private static Builder create() {
+ Builder builder = new Builder();
+ builder.result = new org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version();
+ return builder;
+ }
+
+ protected org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version internalGetResult() {
+ return result;
+ }
+
+ public Builder clear() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "Cannot call clear() after build().");
+ }
+ result = new org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(result);
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.getDefaultInstance();
+ }
+
+ public boolean isInitialized() {
+ return result.isInitialized();
+ }
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version build() {
+ if (result != null && !isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return buildPartial();
+ }
+
+ private org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ if (!isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return buildPartial();
+ }
+
+ public org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version buildPartial() {
+ if (result == null) {
+ throw new IllegalStateException(
+ "build() has already been called on this Builder.");
+ }
+ org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version returnMe = result;
+ result = null;
+ return returnMe;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version) {
+ return mergeFrom((org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version other) {
+ if (other == org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.getDefaultInstance()) return this;
+ if (other.hasStargateVersion()) {
+ setStargateVersion(other.getStargateVersion());
+ }
+ if (other.hasJvmVersion()) {
+ setJvmVersion(other.getJvmVersion());
+ }
+ if (other.hasOsVersion()) {
+ setOsVersion(other.getOsVersion());
+ }
+ if (other.hasServerVersion()) {
+ setServerVersion(other.getServerVersion());
+ }
+ if (other.hasJerseyVersion()) {
+ setJerseyVersion(other.getJerseyVersion());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ setStargateVersion(input.readString());
+ break;
+ }
+ case 18: {
+ setJvmVersion(input.readString());
+ break;
+ }
+ case 26: {
+ setOsVersion(input.readString());
+ break;
+ }
+ case 34: {
+ setServerVersion(input.readString());
+ break;
+ }
+ case 42: {
+ setJerseyVersion(input.readString());
+ break;
+ }
+ }
+ }
+ }
+
+
+ // optional string stargateVersion = 1;
+ public boolean hasStargateVersion() {
+ return result.hasStargateVersion();
+ }
+ public java.lang.String getStargateVersion() {
+ return result.getStargateVersion();
+ }
+ public Builder setStargateVersion(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasStargateVersion = true;
+ result.stargateVersion_ = value;
+ return this;
+ }
+ public Builder clearStargateVersion() {
+ result.hasStargateVersion = false;
+ result.stargateVersion_ = getDefaultInstance().getStargateVersion();
+ return this;
+ }
+
+ // optional string jvmVersion = 2;
+ public boolean hasJvmVersion() {
+ return result.hasJvmVersion();
+ }
+ public java.lang.String getJvmVersion() {
+ return result.getJvmVersion();
+ }
+ public Builder setJvmVersion(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasJvmVersion = true;
+ result.jvmVersion_ = value;
+ return this;
+ }
+ public Builder clearJvmVersion() {
+ result.hasJvmVersion = false;
+ result.jvmVersion_ = getDefaultInstance().getJvmVersion();
+ return this;
+ }
+
+ // optional string osVersion = 3;
+ public boolean hasOsVersion() {
+ return result.hasOsVersion();
+ }
+ public java.lang.String getOsVersion() {
+ return result.getOsVersion();
+ }
+ public Builder setOsVersion(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasOsVersion = true;
+ result.osVersion_ = value;
+ return this;
+ }
+ public Builder clearOsVersion() {
+ result.hasOsVersion = false;
+ result.osVersion_ = getDefaultInstance().getOsVersion();
+ return this;
+ }
+
+ // optional string serverVersion = 4;
+ public boolean hasServerVersion() {
+ return result.hasServerVersion();
+ }
+ public java.lang.String getServerVersion() {
+ return result.getServerVersion();
+ }
+ public Builder setServerVersion(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasServerVersion = true;
+ result.serverVersion_ = value;
+ return this;
+ }
+ public Builder clearServerVersion() {
+ result.hasServerVersion = false;
+ result.serverVersion_ = getDefaultInstance().getServerVersion();
+ return this;
+ }
+
+ // optional string jerseyVersion = 5;
+ public boolean hasJerseyVersion() {
+ return result.hasJerseyVersion();
+ }
+ public java.lang.String getJerseyVersion() {
+ return result.getJerseyVersion();
+ }
+ public Builder setJerseyVersion(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ result.hasJerseyVersion = true;
+ result.jerseyVersion_ = value;
+ return this;
+ }
+ public Builder clearJerseyVersion() {
+ result.hasJerseyVersion = false;
+ result.jerseyVersion_ = getDefaultInstance().getJerseyVersion();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.Version)
+ }
+
+ static {
+ defaultInstance = new Version(true);
+ org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.internalForceInit();
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.stargate.protobuf.generated.Version)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\024VersionMessage.proto\0223org.apache.hadoo" +
+ "p.hbase.stargate.protobuf.generated\"w\n\007V" +
+ "ersion\022\027\n\017stargateVersion\030\001 \001(\t\022\022\n\njvmVe" +
+ "rsion\030\002 \001(\t\022\021\n\tosVersion\030\003 \001(\t\022\025\n\rserver" +
+ "Version\030\004 \001(\t\022\025\n\rjerseyVersion\030\005 \001(\t"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_org_apache_hadoop_hbase_stargate_protobuf_generated_Version_descriptor,
+ new java.lang.String[] { "StargateVersion", "JvmVersion", "OsVersion", "ServerVersion", "JerseyVersion", },
+ org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.class,
+ org.apache.hadoop.hbase.stargate.protobuf.generated.VersionMessage.Version.Builder.class);
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ public static void internalForceInit() {}
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/JAXBContextResolver.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/JAXBContextResolver.java
new file mode 100644
index 0000000..0f3bff6
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/JAXBContextResolver.java
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.provider;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+import javax.ws.rs.ext.ContextResolver;
+import javax.ws.rs.ext.Provider;
+import javax.xml.bind.JAXBContext;
+
+import org.apache.hadoop.hbase.stargate.model.CellModel;
+import org.apache.hadoop.hbase.stargate.model.CellSetModel;
+import org.apache.hadoop.hbase.stargate.model.ColumnSchemaModel;
+import org.apache.hadoop.hbase.stargate.model.RowModel;
+import org.apache.hadoop.hbase.stargate.model.ScannerModel;
+import org.apache.hadoop.hbase.stargate.model.StorageClusterStatusModel;
+import org.apache.hadoop.hbase.stargate.model.StorageClusterVersionModel;
+import org.apache.hadoop.hbase.stargate.model.TableInfoModel;
+import org.apache.hadoop.hbase.stargate.model.TableListModel;
+import org.apache.hadoop.hbase.stargate.model.TableModel;
+import org.apache.hadoop.hbase.stargate.model.TableRegionModel;
+import org.apache.hadoop.hbase.stargate.model.TableSchemaModel;
+import org.apache.hadoop.hbase.stargate.model.VersionModel;
+
+import com.sun.jersey.api.json.JSONConfiguration;
+import com.sun.jersey.api.json.JSONJAXBContext;
+
+/**
+ * Plumbing for hooking up Jersey's JSON entity body encoding and decoding
+ * support to JAXB. Modify how the context is created (by using e.g. a
+ * different configuration builder) to control how JSON is processed and
+ * created.
+ */
+@Provider
+public class JAXBContextResolver implements ContextResolver {
+
+ private final JAXBContext context;
+
+ private final Set> types;
+
+ private final Class>[] cTypes = {
+ CellModel.class,
+ CellSetModel.class,
+ ColumnSchemaModel.class,
+ RowModel.class,
+ ScannerModel.class,
+ StorageClusterStatusModel.class,
+ StorageClusterVersionModel.class,
+ TableInfoModel.class,
+ TableListModel.class,
+ TableModel.class,
+ TableRegionModel.class,
+ TableSchemaModel.class,
+ VersionModel.class
+ };
+
+ @SuppressWarnings("unchecked")
+ public JAXBContextResolver() throws Exception {
+ this.types = new HashSet(Arrays.asList(cTypes));
+ this.context = new JSONJAXBContext(JSONConfiguration.natural().build(),
+ cTypes);
+ }
+
+ @Override
+ public JAXBContext getContext(Class> objectType) {
+ return (types.contains(objectType)) ? context : null;
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/consumer/ProtobufMessageBodyConsumer.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/consumer/ProtobufMessageBodyConsumer.java
new file mode 100644
index 0000000..7fcdbcf
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/consumer/ProtobufMessageBodyConsumer.java
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.provider.consumer;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyReader;
+import javax.ws.rs.ext.Provider;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.stargate.Constants;
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
+
+/**
+ * Adapter for hooking up Jersey content processing dispatch to
+ * ProtobufMessageHandler interface capable handlers for decoding protobuf input.
+ */
+@Provider
+@Consumes(Constants.MIMETYPE_PROTOBUF)
+public class ProtobufMessageBodyConsumer
+ implements MessageBodyReader {
+ private static final Log LOG =
+ LogFactory.getLog(ProtobufMessageBodyConsumer.class);
+
+ @Override
+ public boolean isReadable(Class> type, Type genericType,
+ Annotation[] annotations, MediaType mediaType) {
+ return ProtobufMessageHandler.class.isAssignableFrom(type);
+ }
+
+ @Override
+ public ProtobufMessageHandler readFrom(Class type, Type genericType,
+ Annotation[] annotations, MediaType mediaType,
+ MultivaluedMap httpHeaders, InputStream inputStream)
+ throws IOException, WebApplicationException {
+ ProtobufMessageHandler obj = null;
+ try {
+ obj = type.newInstance();
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ byte[] buffer = new byte[4096];
+ int read;
+ do {
+ read = inputStream.read(buffer, 0, buffer.length);
+ if (read > 0) {
+ baos.write(buffer, 0, read);
+ }
+ } while (read > 0);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug(getClass() + ": read " + baos.size() + " bytes from " +
+ inputStream);
+ }
+ obj = obj.getObjectFromMessage(baos.toByteArray());
+ } catch (InstantiationException e) {
+ throw new WebApplicationException(e);
+ } catch (IllegalAccessException e) {
+ throw new WebApplicationException(e);
+ }
+ return obj;
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/producer/PlainTextMessageBodyProducer.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/producer/PlainTextMessageBodyProducer.java
new file mode 100644
index 0000000..4dde941
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/producer/PlainTextMessageBodyProducer.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.provider.producer;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+import java.util.Map;
+import java.util.WeakHashMap;
+
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyWriter;
+import javax.ws.rs.ext.Provider;
+
+import org.apache.hadoop.hbase.stargate.Constants;
+
+/**
+ * An adapter between Jersey and Object.toString(). Hooks up plain text output
+ * to the Jersey content handling framework.
+ * Jersey will first call getSize() to learn the number of bytes that will be
+ * sent, then writeTo to perform the actual I/O.
+ */
+@Provider
+@Produces(Constants.MIMETYPE_TEXT)
+public class PlainTextMessageBodyProducer
+ implements MessageBodyWriter {
+
+ private Map buffer = new WeakHashMap();
+
+ @Override
+ public boolean isWriteable(Class> arg0, Type arg1, Annotation[] arg2,
+ MediaType arg3) {
+ return true;
+ }
+
+ @Override
+ public long getSize(Object object, Class> type, Type genericType,
+ Annotation[] annotations, MediaType mediaType) {
+ byte[] bytes = object.toString().getBytes();
+ buffer.put(object, bytes);
+ return bytes.length;
+ }
+
+ @Override
+ public void writeTo(Object object, Class> type, Type genericType,
+ Annotation[] annotations, MediaType mediaType,
+ MultivaluedMap httpHeaders, OutputStream outStream)
+ throws IOException, WebApplicationException {
+ outStream.write(buffer.remove(object));
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/producer/ProtobufMessageBodyProducer.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/producer/ProtobufMessageBodyProducer.java
new file mode 100644
index 0000000..b3ab55a
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/provider/producer/ProtobufMessageBodyProducer.java
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.provider.producer;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+import java.util.Map;
+import java.util.WeakHashMap;
+
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyWriter;
+import javax.ws.rs.ext.Provider;
+
+import org.apache.hadoop.hbase.stargate.Constants;
+import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
+
+/**
+ * An adapter between Jersey and ProtobufMessageHandler implementors. Hooks up
+ * protobuf output producing methods to the Jersey content handling framework.
+ * Jersey will first call getSize() to learn the number of bytes that will be
+ * sent, then writeTo to perform the actual I/O.
+ */
+@Provider
+@Produces(Constants.MIMETYPE_PROTOBUF)
+public class ProtobufMessageBodyProducer
+ implements MessageBodyWriter {
+
+ private Map buffer = new WeakHashMap();
+
+ @Override
+ public boolean isWriteable(Class> type, Type genericType,
+ Annotation[] annotations, MediaType mediaType) {
+ return ProtobufMessageHandler.class.isAssignableFrom(type);
+ }
+
+ @Override
+ public long getSize(ProtobufMessageHandler m, Class> type, Type genericType,
+ Annotation[] annotations, MediaType mediaType) {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ try {
+ baos.write(m.createProtobufOutput());
+ } catch (IOException e) {
+ return -1;
+ }
+ byte[] bytes = baos.toByteArray();
+ buffer.put(m, bytes);
+ return bytes.length;
+ }
+
+ public void writeTo(ProtobufMessageHandler m, Class> type, Type genericType,
+ Annotation[] annotations, MediaType mediaType,
+ MultivaluedMap httpHeaders, OutputStream entityStream)
+ throws IOException, WebApplicationException {
+ entityStream.write(buffer.remove(m));
+ }
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/HTableTokenBucket.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/HTableTokenBucket.java
new file mode 100644
index 0000000..007cb5d
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/HTableTokenBucket.java
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.util;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.RowLock;
+import org.apache.hadoop.hbase.stargate.Constants;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.util.StringUtils;
+
+/**
+ * A HTable-backed token bucket.
+ *
+ * Can be configured with rate , the number of tokens to add to the
+ * bucket each second, and size , the maximum number of tokens allowed
+ * to burst. Configuration is stored in the HTable adjacent to the token
+ * count and is periodically refreshed.
+ *
+ * Expected columns:
+ *
+ *
+ * user:
+ *
+ * user:tokens
+ * user:tokens.rate
+ * user:tokens.size
+ *
+ *
+ */
+public class HTableTokenBucket implements Constants {
+
+ static final Log LOG = LogFactory.getLog(HTableTokenBucket.class);
+
+ static final byte[] USER = Bytes.toBytes("user");
+ static final byte[] TOKENS = Bytes.toBytes("tokens");
+ static final byte[] TOKENS_RATE = Bytes.toBytes("tokens.rate");
+ static final byte[] TOKENS_SIZE = Bytes.toBytes("tokens.size");
+
+ Configuration conf;
+ String tableName;
+ HTable table;
+ byte[] row;
+ int tokens;
+ double rate = 10.0; // default, 10 ops added per second
+ int size = 100; // burst
+ long lastUpdated = System.currentTimeMillis();
+ long configUpdateInterval;
+ long lastConfigUpdated = System.currentTimeMillis();
+
+ void updateConfig() throws IOException {
+ Get get = new Get(row);
+ get.addColumn(USER, TOKENS_RATE);
+ get.addColumn(USER, TOKENS_SIZE);
+ Result result = table.get(get);
+ byte[] value = result.getValue(USER, TOKENS_RATE);
+ if (value != null) {
+ this.rate = (int)Bytes.toDouble(value);
+ }
+ value = result.getValue(USER, TOKENS_SIZE);
+ if (value != null) {
+ this.size = (int)Bytes.toLong(value);
+ }
+ }
+
+ /**
+ * Constructor
+ * @param conf configuration
+ * @param row row key for user
+ * @throws IOException
+ */
+ public HTableTokenBucket(Configuration conf, byte[] row)
+ throws IOException {
+ this(conf, conf.get("stargate.tb.htable.name", USERS_TABLE), row);
+ }
+
+ /**
+ * Constructor
+ * @param conf configuration
+ * @param tableName the table to use
+ * @param row row key for user
+ * @throws IOException
+ */
+ public HTableTokenBucket(Configuration conf, String tableName,
+ byte[] row) throws IOException {
+ this.conf = conf;
+ this.tableName = tableName;
+ this.row = row;
+ this.table = new HTable(conf, tableName);
+ this.configUpdateInterval =
+ conf.getLong("stargate.tb.update.interval", 1000 * 60);
+ updateConfig();
+ }
+
+ /**
+ * @return the number of remaining tokens in the bucket (roughly)
+ * @throws IOException
+ */
+ public int available() throws IOException {
+ long now = System.currentTimeMillis();
+ if (now - lastConfigUpdated > configUpdateInterval) {
+ try {
+ updateConfig();
+ } catch (IOException e) {
+ LOG.warn(StringUtils.stringifyException(e));
+ }
+ lastConfigUpdated = now;
+ }
+
+ // We can't simply use incrementColumnValue here because the timestamp of
+ // the keyvalue will not be changed as long as it remains in memstore, so
+ // there will be some unavoidable contention on the row if multiple
+ // Stargate instances are concurrently serving the same user, and three
+ // more round trips than otherwise.
+ RowLock rl = table.lockRow(row);
+ try {
+ Get get = new Get(row, rl);
+ get.addColumn(USER, TOKENS);
+ List kvs = table.get(get).list();
+ if (kvs != null && !kvs.isEmpty()) {
+ KeyValue kv = kvs.get(0);
+ tokens = (int)Bytes.toLong(kv.getValue());
+ lastUpdated = kv.getTimestamp();
+ } else {
+ tokens = (int)rate;
+ }
+ long elapsed = now - lastUpdated;
+ int i = (int)((elapsed / 1000) * rate); // convert sec <-> ms
+ if (tokens + i > size) {
+ i = size - tokens;
+ }
+ if (i > 0) {
+ tokens += i;
+ Put put = new Put(row, rl);
+ put.add(USER, TOKENS, Bytes.toBytes((long)tokens));
+ put.setWriteToWAL(false);
+ table.put(put);
+ table.flushCommits();
+ }
+ } finally {
+ table.unlockRow(rl);
+ }
+ return tokens;
+ }
+
+ /**
+ * @param t the number of tokens to consume from the bucket
+ * @throws IOException
+ */
+ public void remove(int t) throws IOException {
+ // Here we don't care about timestamp changes; actually it's advantageous
+ // if they are not updated, otherwise available() and remove() must be
+ // used as near to each other in time as possible.
+ table.incrementColumnValue(row, USER, TOKENS, (long) -t, false);
+ }
+
+ public double getRate() {
+ return rate;
+ }
+
+ public int getSize() {
+ return size;
+ }
+
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/SoftUserData.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/SoftUserData.java
new file mode 100644
index 0000000..5b8638e
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/SoftUserData.java
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.util;
+
+import java.util.Map;
+
+import org.apache.hadoop.hbase.stargate.User;
+import org.apache.hadoop.hbase.util.SoftValueMap;
+
+/**
+ * Provides a softmap backed collection of user data. The collection can be
+ * reclaimed by the garbage collector at any time when under heap pressure.
+ */
+public class SoftUserData extends UserData {
+
+ static final Map map = new SoftValueMap();
+
+ public static synchronized UserData get(final User user) {
+ UserData data = map.get(user);
+ if (data == null) {
+ data = new UserData();
+ map.put(user, data);
+ }
+ return data;
+ }
+
+ public static synchronized UserData put(final User user,
+ final UserData data) {
+ return map.put(user, data);
+ }
+
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/TokenBucket.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/TokenBucket.java
new file mode 100644
index 0000000..49f7482
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/TokenBucket.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.util;
+
+public class TokenBucket {
+
+ private int tokens;
+ private int rate;
+ private int size;
+ private long lastUpdated;
+
+ /**
+ * Constructor
+ * @param rate limit in units per second
+ * @param size maximum burst in units per second
+ */
+ public TokenBucket(int rate, int size) {
+ this.rate = rate;
+ this.tokens = this.size = size;
+ }
+
+ /**
+ * @return the number of remaining tokens in the bucket
+ */
+ public int available() {
+ long now = System.currentTimeMillis();
+ long elapsed = now - lastUpdated;
+ lastUpdated = now;
+ tokens += elapsed * rate;
+ if (tokens > size) {
+ tokens = size;
+ }
+ return tokens;
+ }
+
+ /**
+ * @param t the number of tokens to consume from the bucket
+ */
+ public void remove(int t) {
+ tokens -= t;
+ }
+
+}
diff --git a/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/UserData.java b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/UserData.java
new file mode 100644
index 0000000..6cc3522
--- /dev/null
+++ b/contrib/stargate/core/src/main/java/org/apache/hadoop/hbase/stargate/util/UserData.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.util;
+
+import java.util.ArrayList;
+
+/**
+ * Generic storage for per user information.
+ */
+public class UserData {
+
+ public static final int TOKENBUCKET = 0;
+
+ ArrayList data = new ArrayList();
+
+ public synchronized boolean has(final int sel) {
+ try {
+ return data.get(sel) != null;
+ } catch (IndexOutOfBoundsException e) {
+ return false;
+ }
+ }
+
+ public synchronized Object get(final int sel) {
+ try {
+ return data.get(sel);
+ } catch (IndexOutOfBoundsException e) {
+ return null;
+ }
+ }
+
+ public synchronized Object put(final int sel, final Object o) {
+ Object old = null;
+ try {
+ old = data.get(sel);
+ } catch (IndexOutOfBoundsException e) {
+ // do nothing
+ }
+ data.set(sel, o);
+ return old;
+ }
+
+ public synchronized Object remove(int sel) {
+ return put(sel, null);
+ }
+
+}
diff --git a/contrib/stargate/core/src/main/javadoc/org/apache/hadoop/hbase/stargate/package.html b/contrib/stargate/core/src/main/javadoc/org/apache/hadoop/hbase/stargate/package.html
new file mode 100755
index 0000000..d88521d
--- /dev/null
+++ b/contrib/stargate/core/src/main/javadoc/org/apache/hadoop/hbase/stargate/package.html
@@ -0,0 +1,1576 @@
+
+
+
+
+
+
+
+Stargate
+This package provides "Stargate" -- a RESTful Web service front end for HBase.
+
+
+
Table Of Contents
+
+Status
+Deployment
+
+ Daemon
+ Servlet
+
+Representational State Transfer
+Resource Identifiers
+Operations
+
+ Query Software Version
+ Query Storage Cluster Version
+ Query Storage Cluster Status
+ Query Table List
+ Query Table Schema
+ Create Table Or Update Table Schema
+ Query Table Metadata
+ Delete Table
+ Cell Query (Single Value)
+ Cell or Row Query (Multiple Values)
+ Cell Store (Single)
+ Cell Store (Multiple)
+ Row, Column, or Cell Delete
+ Scanner Creation
+ Scanner Get Next
+ Scanner Deletion
+
+ XML Schema
+ Protobufs Schema
+
+
+
+
+Status
+
+
+Stargate is alpha quality software. It has received testing under controlled
+conditions and functions correctly given well formed input. It is bundled with
+a suite of unit tests which form the basis for correctness and regression
+testing but the coverage of this suite is basic. Stargate has not yet been
+tested under high load conditions. No comprehensive performance measurements
+have been made.
+
+Performance testing and tuning, and increased test suite coverage including
+deliberately corrupted inputs are both on the roadmap toward beta quality.
+
+
+
+Deployment
+
+
+
+
+
+Daemon
+
+
+Stargate can run as a daemon which starts an embedded Jetty servlet container
+and deploys the servlet into it.
+
+
+Place the Stargate jar in either the HBase installation root directory or
+lib/ directories.
+
Copy the jars from contrib/stargate/lib/ into the lib/
+directory of the HBase installation.
+
Start the embedded Jetty servlet container:
+
+
+ In the foreground:
+
+
+ % ./bin/hbase org.apache.hadoop.hbase.stargate.Main -p <port >
+
+
+
+ where <port > is optional, and is the port the connector should
+ listen on. (Default is 8080.)
+
+
+
In the background:
+
+
+ % ./bin/hbase-daemon.sh start org.apache.hadoop.hbase.stargate.Main -p <port >
+
+
+
+ where <port > is optional, and is the port the connector should
+ listen on. (Default is 8080.)
+
+
+
+
+
+
+
+Servlet
+
+
+Stargate is also packaged as a Java web application (WAR) which can be
+deployed into any servlet API compliant container, such as Tomcat, Jetty,
+Glassfish, etc.
+
+
+Copy the HBase jar from the HBase installation root directory and all jars
+from the HBase lib/ and contrib/stargate/lib/ directories to
+somewhere on the servlet container's classpath, into a shared deployment lib
+directory for example.
+
Drop the Stargate WAR where container expects it.
+
Configure the servlet. An example for Jetty follows:
+
+
+web.xml
+
+<!DOCTYPE web-app PUBLIC
+"-//Sun Microsystems, Inc.//DTD Web Application 2.3//EN"
+"http://java.sun.com/dtd/web-app_2_3.dtd">
+
+<web-app>
+ <display-name>
+ HBase Stargate
+ </display-name>
+
+ <servlet>
+ <servlet-name>api</servlet-name>
+ <servlet-class>org.apache.hadoop.hbase.stargate.RESTServlet</servlet-class>
+ <load-on-startup>1</load-on-startup>
+
+ <init-param>
+ <param-name>com.sun.jersey.config.property.packages</param-name>
+ <param-value>org.apache.hadoop.hbase.stargate</param-value>
+ </init-param>
+ <init-param>
+ <param-name>com.sun.jersey.config.property.resourceConfigClass</param-name>
+ <param-value>com.sun.jersey.api.core.PackagesResourceConfig</param-value>
+ </init-param>
+ </servlet>
+
+ <servlet-mapping>
+ <servlet-name>api</servlet-name>
+ <url-pattern>/*</url-pattern>
+ </servlet-mapping>
+</web-app>
+
+
+
+
+jetty.xml
+
+<!DOCTYPE Configure PUBLIC "-//Mort Bay Consulting//DTD Configure//EN"
+ "http://jetty.mortbay.org/configure.dtd">
+
+<Configure id="Server" class="org.mortbay.jetty.Server">
+ <Call name="addConnector">
+ <Arg>
+ <New class="org.mortbay.jetty.nio.SelectChannelConnector">
+ <Set name="port">
+ 8080
+ </Set>
+ </New>
+ </Arg>
+ </Call>
+
+ <Set name="handler">
+ <New id="Handlers" class="org.mortbay.jetty.handler.HandlerCollection">
+ <Array type="org.mortbay.jetty.Handler">
+ <Set name="handlers">
+ <Item>
+ <New id="Contexts" class="org.mortbay.jetty.handler.ContextHandlerCollection"/>
+ </Item>
+ </Set>
+ </Array>
+ </New>
+ </Set>
+
+ <Call name="addLifeCycle">
+ <Arg>
+ <New class="org.mortbay.jetty.deployer.WebAppDeployer">
+ <Set name="contexts">
+ <Ref id="Contexts"/>
+ </Set>
+ <Set name="webAppDir">
+ <SystemProperty name="jetty.home" default="."/>/webapps
+ </Set>
+ </New>
+ </Arg>
+ </Call>
+</Configure>
+
+
+
+
+
+
+
+
+
+Representational State Transfer
+
+
+
+The terms "representational state transfer" and "REST" were introduced in 2000
+in the
+
+doctoral dissertation of Roy Fielding , one of the principal authors of the
+Hypertext Transfer Protocol (HTTP) specification.
+
+A GET to an identifier requests a copy of the information in the supplied
+content type.
+
+A PUT to an identifier replaces the information. The supplied content type
+determines how it is to be interpreted.
+
+POST adds information.
+
+DELETE eliminates information.
+
+
+
+Database Operations
+ REST/HTTP Equivalents
+
+
+CREATE PUT
+READ GET
+UPDATE POST (update) or PUT (replace)
+DELETE DELETE
+
+
+
+
+
+Resource Identifiers
+
+
+RFC 3968 defines URL
+syntax:
+
+
+scheme://user:pass@example.net:8080/path/to/file;type=foo?name=val#frag
+\_____/ \_______/\___________/\__/\______/\____/\______/\________/\___/
+ | | | | | | | | |
+ scheme userinfo hostname port path filename param query fragment
+ \________________________/
+ authority
+
+
+Stargate exposes HBase tables, rows, cells, and metadata as URL specified
+resources.
+
+NOTE: The characters / , : , and , are reserved
+within row keys, column names, and column qualifiers. Clients must escape them
+somehow, perhaps by encoding them as hex escapes or by using www-url-encoding. For
+example, the key:
+
+
+ http://www.google.com/
+
+
+should first be encoded as:
+
+
+ http%3A%2F%2Fwww.google.com%2F
+
+
+to produce a path like:
+
+ /SomeTable/http%3A%2F%2Fwww.google.com%2F/someColumn:qualifier
+
+
+
Addressing for cell or row query (GET)
+
+
+ path := '/' <table>
+ '/' <row>
+ ( '/' ( <column> ( ':' <qualifier> )?
+ ( ',' <column> ( ':' <qualifier> )? )+ )?
+ ( '/' ( <start-timestamp> ',' )? <end-timestamp> )? )?
+ query := ( '?' 'v' '=' <num-versions> )?
+
+
+
+
Addressing for single value store (PUT)
+
+Address with table, row, column (and optional qualifier), and optional timestamp.
+
+
+ path := '/' <table> '/' <row> '/' <column> ( ':' <qualifier> )?
+ ( '/' <timestamp> )?
+
+
+
+
Addressing for multiple (batched) value store (PUT)
+
+
+ path := '/' <table> '/' <false-row-key>
+
+
+
+
Addressing for row, column, or cell DELETE
+
+
+ path := '/' <table>
+ '/' <row>
+ ( '/' <column> ( ':' <qualifier> )?
+ ( '/' <timestamp> )? )?
+
+
+
+
Addressing for table creation or schema update (PUT or POST), schema query
+(GET), or delete (DELETE)
+
+
+ path := '/' <table> / 'schema'
+
+
+
+
Addressing for scanner creation (POST)
+
+
+ path := '/' <table> '/' 'scanner'
+
+
+
+
Addressing for scanner next item (GET)
+
+
+ path := '/' <table> '/' 'scanner' '/' <scanner-id>
+
+
+
+
Addressing for scanner deletion (DELETE)
+
+
+ path := '/' <table> '/' '%scanner' '/' <scanner-id>
+
+
+
+
+
+Operations
+
+
+
+
+Query Software Version
+
+
+
+GET /version
+
+
+Returns the software version.
+Set Accept header to text/plain for plain text output.
+Set Accept header to text/xml for XML reply.
+Set Accept header to application/json for JSON reply.
+Set Accept header to application/x-protobuf for protobufs.
+
+If not successful, returns appropriate HTTP error status code.
+If successful, returns the software version.
+
+Examples:
+
+
+
+% curl http://localhost:8000/version
+
+HTTP/1.1 200 OK
+Content-Length: 149
+Cache-Control: no-cache
+Content-Type: text/plain
+
+Stargate 0.0.1 [JVM: Sun Microsystems Inc. 1.6.0_13-11.3-b02] [OS: Linux 2.6.
+18-128.1.6.el5.centos.plusxen amd64] [Jetty: 6.1.14] [Jersey: 1.1.0-ea]
+
+% curl -H "Accept: text/xml" http://localhost:8000/version
+
+HTTP/1.1 200 OK
+Cache-Control: no-cache
+Content-Type: text/xml
+Content-Length: 212
+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<Version Stargate="0.0.1" OS="Linux 2.6.18-128.1.6.el5.centos.plusxen amd64"
+ JVM="Sun Microsystems Inc. 1.6.0_13-11.3-b02" Jetty="6.1.14" Jersey="1.1.0-e
+a"/>
+
+% curl -H "Accept: application/json" http://localhost:8000/version
+
+HTTP/1.1 200 OK
+Cache-Control: no-cache
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{"@Stargate":"0.0.1","@OS":"Linux 2.6.18-128.1.6.el5.centos.plusxen amd64","@
+JVM":"Sun Microsystems Inc. 1.6.0_13-11.3-b02","@Jetty":"6.1.14","@Jersey":"1
+.1.0-ea"}
+
+% curl -H "Accept: application/x-protobuf" http://localhost:8000/version
+
+HTTP/1.1 200 OK
+Content-Length: 113
+Cache-Control: no-cache
+Content-Type: application/x-protobuf
+
+000000 0a 05 30 2e 30 2e 31 12 27 53 75 6e 20 4d 69 63
+000010 72 6f 73 79 73 74 65 6d 73 20 49 6e 63 2e 20 31
+000020 2e 36 2e 30 5f 31 33 2d 31 31 2e 33 2d 62 30 32
+000030 1a 2d 4c 69 6e 75 78 20 32 2e 36 2e 31 38 2d 31
+000040 32 38 2e 31 2e 36 2e 65 6c 35 2e 63 65 6e 74 6f
+000050 73 2e 70 6c 75 73 78 65 6e 20 61 6d 64 36 34 22
+000060 06 36 2e 31 2e 31 34 2a 08 31 2e 31 2e 30 2d 65
+000070 61
+
+
+
+
+
+Query Storage Cluster Version
+
+
+
+GET /version/cluster
+
+
+Returns version information regarding the HBase cluster backing the Stargate instance.
+
+Examples:
+
+
+
+% curl http://localhost:8000/version/cluster
+
+HTTP/1.1 200 OK
+Content-Length: 6
+Cache-Control: no-cache
+Content-Type: text/plain
+
+0.20.0
+
+% curl -H "Accept: text/xml" http://localhost:8000/version/cluster
+
+HTTP/1.1 200 OK
+Cache-Control: no-cache
+Content-Type: text/xml
+Content-Length: 94
+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<ClusterVersion>0.20.0</ClusterVersion>
+
+% curl -H "Accept: application/json" http://localhost:8000/version/cluster
+
+HTTP/1.1 200 OK
+Cache-Control: no-cache
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+"0.20.0"
+
+
+
+
+
+Query Storage Cluster Status
+
+
+
+GET /status/cluster
+
+
+Returns detailed status on the HBase cluster backing the Stargate instance.
+
+Examples:
+
+
+
+% curl http://localhost:8000/status/cluster
+
+
+HTTP/1.1 200 OK
+Content-Length: 839
+Cache-Control: no-cache
+Content-Type: text/plain
+
+1 live servers, 0 dead servers, 13.0000 average load
+
+1 live servers
+ test:37154 1244960965781
+ requests=1, regions=13
+
+ urls,http|www.legacy.com|80|site=Legacy|aamsz=300x250||position=1|prod
+ =1,1244851990859
+ urls,http|weather.boston.com|80|LYNX.js,1244851990859
+ .META.,,1
+ content,601292a839b95e50200d8f8767859864,1244869158156
+ content,9d7f3aeb2a5c1e2b45d690a91de3f23c,1244879698031
+ content,7f6d48830ef51d635e9a5b672e79a083,1244879698031
+ content,3ef16d776603bf9b9e775c9ceb64860f,1244869158156
+ urls,,1244851989250
+ urls,http|groups.google.com|80|groups|img|card_left.gif,1244851989250
+ content,deafed2f90f718d72caaf87bd6c27d04,1244870320343
+ content,bcf91ecf78ea72a33faccfb8e6b5d900,1244870320343
+ -ROOT-,,0
+ content,,1244851999187
+
+
+% curl -H "Accept: text/xml" http://localhost:8000/status/cluster
+
+HTTP/1.1 200 OK
+Cache-Control: no-cache
+Content-Type: text/xml
+Content-Length: 1301
+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<ClusterStatus requests="1" regions="13" averageLoad="13.0"><DeadNodes/><LiveN
+odes><Node startCode="1244960965781" requests="1" name="test:37154"><Region na
+me="dXJscyxodHRwfHd3dy5sZWdhY3kuY29tfDgwfHNpdGU9TGVnYWN5fGFhbXN6PTMwMHgyNTB8YX
+JlYT1DSlDQaElDQUdPVFJJQlVORS4yMXx6b25lPUhvbWV8cG9zaXRpb249MXxwcm9kPTEsMTI0NDg1
+MTk5MDg1OQ=="/><Region name="dXJscyxodHRwfHdlYXRoZXIuYm9zdG9uLmNvbXw4MHxMWU5YL
+mpzLDEyNDQ4NTE5OTA4NTk="/><Region name="Lk1FVEEuLCwx"/><Region name="Y29udGVud
+Cw2MDEyOTJhODM5Yjk1ZTUwMjAwZDhmODc2Nzg1OTg2NCwxMjQ0ODY5MTU4MTU2"/><Region name
+="Y29udGVudCw5ZDdmM2FlYjJhNWMxZTJiNDVkNjkwYTkxZGUzZjIzYywxMjQ0ODc5Njk4MDMx"/><
+Region name="Y29udGVudCw3ZjZkNDg4MzBlZjUxZDYzNWU5YTViNjcyZTc5YTA4MywxMjQ0ODc5N
+jk4MDMx"/><Region name="Y29udGVudCwzZWYxNmQ3NzY2MDNiZjliOWU3NzVjOWNlYjY0ODYwZi
+wxMjQ0ODY5MTU4MTU2"/><Region name="dXJscywsMTI0NDg1MTk4OTI1MA=="/><Region name
+="dXJscyxodHRwfGdyb3Vwcy5nb29nbGUuY29tfDgwfGdyb3Vwc3xpbWd8Y2FyZF9sZWZ0LmdpZiwx
+MjQ0ODUxOTg5MjUw"/><Region name="Y29udGVudCxkZWFmZWQyZjkwZjcxOGQ3MmNhYWY4N2JkN
+mMyN2QwNCwxMjQ0ODcwMzIwMzQz"/><Region name="Y29udGVudCxiY2Y5MWVjZjc4ZWE3MmEzM2
+ZhY2NmYjhlNmI1ZDkwMCwxMjQ0ODcwMzIwMzQz"/><Region name="LVJPT1QtLCww"/><Region
+name="Y29udGVudCwsMTI0NDg1MTk5OTE4Nw=="/></Node></LiveNodes></ClusterStatus>
+
+% curl -H "Accept: application/json" http://localhost:8000/status/cluster
+
+HTTP/1.1 200 OK
+Cache-Control: no-cache
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{"@requests":"1","@regions":"13","@averageLoad":"13.0","DeadNodes":[],"LiveNod
+es":{"Node":{"@startCode":"1244960965781","@requests":"1","@name":"test:37154"
+,"Region":[{"@name":"dXJscyxodHRwfHd3dLmpzy5sZWdhY3kuY29tfDgwfHNpdGU9TGVnYWN5f
+GFhbXN6PTMwMHgyNTB8YXJlYT1DSElDQUdPVFJJQlVORS4yMXx6b25lPUhvbWV8cG9zaXRpb249MXx
+wcm9kPTEsMTI0NDg1MTk5MDg1OQ=="},{"@name":"dXJscyxodHRwfHdlYXRoZXIuYm9zdG9uLmNv
+bXw4MHxMWU5YLmpzLDEyNDQ4NTE5OTA4NTk="},{"@name":"Lk1FVEEuLCwx"},{"@name":"Y29u
+dGVudCw2MDEyOTJhODM5Yjk1ZTUwMjAwZDhmODc2Nzg1OTg2NCwxMjQ0ODY5MTU4MTU2"},{"@name
+":"Y29udGVudCw5ZDdmM2FlYjJhNWMxZTJiNDVkNjkwYTkxZGUzZjIzYywxMjQ0ODc5Njk4MDMx"},
+{"@name":"Y29udGVudCw3ZjZkNDg4MzBlZjUxZDYzNWU5YTViNjcyZTc5YTA4MywxMjQ0ODc5Njk4
+MDMx"},{"@name":"Y29udGVudCwzZWYxNmQ3NzY2MDNiZjliOWU3NzVjOWNlYjY0ODYwZiwxMjQ0O
+DY5MTU4MTU2"},{"@name":"dXJscywsMTI0NDg1MTk4OTI1MA=="},{"@name":"dXJscyxodHRwf
+Gdyb3Vwcy5nb29nbGUuY29tfDgwfGdyb3Vwc3xpbWd8Y2FyZF9sZWZ0LmdpZiwxMjQ0ODUxOTg5MjU
+w"},{"@name":"Y29udGVudCxkZWFmZWQyZjkwZjcxOGQ3MmNhYWY4N2JkNmMyN2QwNCwxMjQ0ODcw
+MzIwMzQz"},{"@name":"Y29udGVudCxiY2Y5MWVjZjc4ZWE3MmEzM2ZhY2NmYjhlNmI1ZDkwMCwxM
+jQ0ODcwMzIwMzQz"},{"@name":"LVJPT1QtLCww"},{"@name":"Y29udGVudCwsMTI0NDg1MTk5O
+TE4Nw=="}]}}}
+
+
+
+
+
+Query Table List
+
+
+
+GET /
+
+
+Retrieves the list of available tables.
+Set Accept header to text/plain for plain text output.
+Set Accept header to text/xml for XML reply.
+Set Accept header to application/json for JSON reply.
+Set Accept header to application/x-protobuf for protobufs.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns the table list in the requested encoding.
+
+Examples:
+
+
+
+% curl http://localhost:8000/
+
+HTTP/1.1 200 OK
+Content-Length: 13
+Cache-Control: no-cache
+Content-Type: text/plain
+
+content
+urls
+
+% curl -H "Accept: text/xml" http://localhost:8000/
+
+HTTP/1.1 200 OK
+Cache-Control: no-cache
+Content-Type: text/xml
+Content-Length: 121
+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<TableList><table name="content"/><table name="urls"/></TableList>
+
+% curl -H "Accept: application/json" http://localhost:8000/
+
+HTTP/1.1 200 OK
+Cache-Control: no-cache
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{"table":[{"name":"content"},{"name":"urls"}]}
+
+% curl -H "Accept: application/x-protobuf" http://localhost:8000/
+
+HTTP/1.1 200 OK
+Content-Length: 15
+Cache-Control: no-cache
+Content-Type: application/x-protobuf
+
+000000 0a 07 63 6f 6e 74 65 6e 74 0a 04 75 72 6c 73
+
+
+
+
+
+Query Table Schema
+
+
+
+GET /<table>/schema
+
+
+Retrieves table schema.
+Set Accept header to text/plain for plain text output.
+Set Accept header to text/xml for XML reply.
+Set Accept header to application/json for JSON reply.
+Set Accept header to application/x-protobuf for protobufs.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns the table schema in the requested encoding.
+
+Examples:
+
+
+
+% curl http://localhost:8000/content/schema
+
+HTTP/1.1 200 OK
+Content-Length: 639
+Cache-Control: no-cache
+Content-Type: text/plain
+
+{ NAME=> 'content', IS_META => 'false', IS_ROOT => 'false', COLUMNS => [ { NA
+ME => 'content', BLOCKSIZE => '65536', BLOOMFILTER => 'false', BLOCKCACHE =>
+'false', COMPRESSION => 'GZ', LENGTH => '2147483647', VERSIONS => '1', TTL =>
+'-1', IN_MEMORY => 'false' }, { NAME => 'info', BLOCKSIZE => '65536', BLOOMFI
+LTER => 'false', BLOCKCACHE => 'false', COMPRESSION => 'NONE', LENGTH => '214
+7483647', VERSIONS => '1', TTL => '-1', IN_MEMORY => 'false' }, { NAME => 'ur
+l', BLOCKSIZE => '65536', BLOOMFILTER => 'false', BLOCKCACHE => 'false', COMP
+RESSION => 'NONE', LENGTH => '2147483647', VERSIONS => '1', TTL => '-1', IN_
+MEMORY => 'false' } ] }
+
+% curl -H "Accept: text/xml" http://localhost:8000/content/schema
+
+HTTP/1.1 200 OK
+Cache-Control: no-cache
+Content-Type: text/xml
+Content-Length: 618
+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<TableSchema name="content" IS_META="false" IS_ROOT="false"><ColumnSchema nam
+e="content" BLOCKSIZE="65536" BLOOMFILTER="false" BLOCKCACHE="false" COMPRESS
+ION="GZ" LENGTH="2147483647" VERSIONS="1" TTL="-1" IN_MEMORY="false"/><Column
+Schema name="info" BLOCKSIZE="65536" BLOOMFILTER="false" BLOCKCACHE="false" C
+OMPRESSION="NONE" LENGTH="2147483647" VERSIONS="1" TTL="-1" IN_MEMORY="false"
+/><ColumnSchema name="url" BLOCKSIZE="65536" BLOOMFILTER="false"BLOCKCACHE="f
+alse" COMPRESSION="NONE" LENGTH="2147483647" VERSIONS="1" TTL="-1" IN_MEMORY=
+"false"/></TableSchema>
+
+% curl -H "Accept: application/json" http://localhost:8000/content/schema
+
+HTTP/1.1 200 OK
+Cache-Control: no-cache
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{"@name":"content","@IS_META":"false","@IS_ROOT":"false","ColumnSchema":[{"@n
+ame":"content","@BLOCKSIZE":"65536","@BLOOMFILTER":"false","@BLOCKCACHE":"fal
+se","@COMPRESSION":"GZ","@LENGTH":"2147483647","@VERSIONS":"1","@TTL":"-1","@
+IN_MEMORY":"false"},{"@name":"info","@BLOCKSIZE":"65536","@BLOOMFILTER":"fals
+e","@BLOCKCACHE":"false","@COMPRESSION":"NONE","@LENGTH":"2147483647","@VERSI
+ONS":"1","@TTL":"-1","@IN_MEMORY":"false"},{"@name":"url","@BLOCKSIZE":"65536
+","@BLOOMFILTER":"false","@BLOCKCACHE":"false","@COMPRESSION":"NONE","@LENGTH
+":"2147483647","@VERSIONS":"1","@TTL":"-1","@IN_MEMORY":"false"}]}
+
+% curl -H "Accept: application/x-protobuf" http://localhost:8000/content/schema
+
+HTTP/1.1 200 OK
+Content-Length: 563
+Cache-Control: no-cache
+Content-Type: application/x-protobuf
+
+000000 0a 07 63 6f 6e 74 65 6e 74 12 10 0a 07 49 53 5f
+000010 4d 45 54 41 12 05 66 61 6c 73 65 12 10 0a 07 49
+000020 53 5f 52 4f 4f 54 12 05 66 61 6c 73 65 1a a7 01
+000030 12 12 0a 09 42 4c 4f 43 4b 53 49 5a 45 12 05 36
+[...]
+000230 4f 4e 45
+
+
+
+
+
+Create Table Or Update Table Schema
+
+
+
+PUT /<table>/schema
+
+POST /<table>/schema
+
+
+Uploads table schema.
+PUT or POST creates table as necessary.
+PUT fully replaces schema.
+POST modifies schema (add or modify column family).
+Supply the full table schema for PUT or a well formed schema fragment for POST
+in the desired encoding.
+Set Content-Type header to text/xml if the desired encoding is XML.
+Set Content-Type header to application/json if the desired encoding
+is JSON.
+Set Content-Type header to application/x-protobuf if the desired
+encoding is protobufs.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns HTTP 200 status.
+
+
+
+Query Table Metadata
+
+
+
+GET /<table>/regions
+
+
+Retrieves table region metadata.
+Set Accept header to text/plain for plain text output.
+Set Accept header to text/xml for XML reply.
+Set Accept header to application/json for JSON reply.
+Set Accept header to application/x-protobuf for protobufs.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns the table region metadata in the requested encoding.
+
+Examples:
+
+
+
+% curl -H "Accept: text/xml" http://localhost:8000/content/regions
+
+HTTP/1.1 200 OK
+Cache-Control: no-cache
+Content-Type: text/xml
+Content-Length: 1555
+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<TableInfo name="content"><Region location="test:51025" endKey="M2VmMTZkNzc2Nj
+AzYmY5YjllNzc1YzljZWI2NDg2MGY=" startKey="" id="1244851999187" name="content,,
+1244851999187"/><Region location="test:51025" endKey="NjAxMjkyYTgzOWI5NWU1MDIw
+MGQ4Zjg3Njc4NTk4NjQ=" startKey="M2VmMTZkNzc2NjAzYmY5YjllNzc1YzljZWI2NDg2MGY="
+id="1244869158156" name="content,3ef16d776603bf9b9e775c9ceb64860f,124486915815
+6"/><Region location="test:51025" endKey="N2Y2ZDQ4ODMwZWY1MWQ2MzVlOWE1YjY3MmU3
+OWEwODM=" startKey="NjAxMjkyYTgzOWI5NWU1MDIwMGQ4Zjg3Njc4NTk4NjQ=" id="12448691
+58156" name="content,601292a839b95e50200d8f8767859864,1244869158156"/><Region
+location="test:51025" endKey="OWQ3ZjNhZWIyYTVjMWUyYjQ1ZDY5MGE5MWRlM2YyM2M=" st
+artKey="N2Y2ZDQ4ODMwZWY1MWQ2MzVlOWE1YjY3MmU3OWEwODM=" id="1244879698031" name=
+"content,7f6d48830ef51d635e9a5b672e79a083,1244879698031"/><Region location="te
+st:51025" endKey="YmNmOTFlY2Y3OGVhNzJhMzNmYWNjZmI4ZTZiNWQ5MDA=" startKey="OWQ3
+ZjNhZWIyYTVjMWUyYjQ1ZDY5MGE5MWRlM2YyM2M=" id="1244879698031" name="content,9d7
+f3aeb2a5c1e2b45d690a91de3f23c,1244879698031"/><Region location="test:51025" en
+dKey="ZGVhZmVkMmY5MGY3MThkNzJjYWFmODdiZDZjMjdkMDQ=" startKey="YmNmOTFlY2Y3OGVh
+NzJhMzNmYWNjZmI4ZTZiNWQ5MDA=" id="1244870320343" name="content,bcf91ecf78ea72a
+33faccfb8e6b5d900,1244870320343"/><Region location="test:51025" endKey="" star
+tKey="ZGVhZmVkMmY5MGY3MThkNzJjYWFmODdiZDZjMjdkMDQ=" id="1244870320343" name="c
+ontent,deafed2f90f718d72caaf87bd6c27d04,1244870320343"/></TableInfo>
+
+% curl -H "Accept: application/json" http://localhost:8000/content/regions
+
+HTTP/1.1 200 OK
+Cache-Control: no-cache
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{"@name":"content","Region":[{"@location":"test:51025","@endKey":"M2VmMTZkNzc2
+NjAzYmY5YjllNzc1YzljZWI2NDg2MGY=","@startKey":"","@id":"1244851999187","@name"
+:"content,,1244851999187"},{"@location":"test:51025","@endKey":"NjAxMjkyYTgzOW
+I5NWU1MDIwMGQ4Zjg3Njc4NTk4NjQ=","@startKey":"M2VmMTZkNzc2NjAzYmY5YjllNzc1YzljZ
+WI2NDg2MGY=","@id":"1244869158156","@name":"content,3ef16d776603bf9b9e775c9ceb
+64860f,1244869158156"},{"@location":"test:51025","@endKey":"N2Y2ZDQ4ODMwZWY1MW
+Q2MzVlOWE1YjY3MmU3OWEwODM=","@startKey":"NjAxMjkyYTgzOWI5NWU1MDIwMGQ4Zjg3Njc4N
+Tk4NjQ=","@id":"1244869158156","@name":"content,601292a839b95e50200d8f87678598
+64,1244869158156"},{"@location":"test:51025","@endKey":"OWQ3ZjNhZWIyYTVjMWUyYj
+Q1ZDY5MGE5MWRlM2YyM2M=","@startKey":"N2Y2ZDQ4ODMwZWY1MWQ2MzVlOWE1YjY3MmU3OWEwO
+DM=","@id":"1244879698031","@name":"content,7f6d48830ef51d635e9a5b672e79a083,1
+244879698031"},{"@location":"test:51025","@endKey":"YmNmOTFlY2Y3OGVhNzJhMzNmYW
+NjZmI4ZTZiNWQ5MDA=","@startKey":"OWQ3ZjNhZWIyYTVjMWUyYjQ1ZDY5MGE5MWRlM2YyM2M="
+,"@id":"1244879698031","@name":"content,9d7f3aeb2a5c1e2b45d690a91de3f23c,12448
+79698031"},{"@location":"test:51025","@endKey":"ZGVhZmVkMmY5MGY3MThkNzJjYWFmOD
+diZDZjMjdkMDQ=","@startKey":"YmNmOTFlY2Y3OGVhNzJhMzNmYWNjZmI4ZTZiNWQ5MDA=","@i
+d":"1244870320343","@name":"content,bcf91ecf78ea72a33faccfb8e6b5d900,124487032
+0343"},{"@location":"test:51025","@endKey":"","@startKey":"ZGVhZmVkMmY5MGY3MTh
+kNzJjYWFmODdiZDZjMjdkMDQ=","@id":"1244870320343","@name":"content,deafed2f90f7
+18d72caaf87bd6c27d04,1244870320343"}]}
+
+% curl -H "Accept: application/x-protobuf" http://localhost:8000/content/regions
+
+HTTP/1.1 200 OK
+Content-Length: 961
+Cache-Control: no-cache
+Content-Type: application/x-protobuf
+
+000000 0a 07 63 6f 6e 74 65 6e 74 12 53 0a 16 63 6f 6e
+000010 74 65 6e 74 2c 2c 31 32 34 34 38 35 31 39 39 39
+000020 31 38 37 12 00 1a 20 33 65 66 31 36 64 37 37 36
+000030 36 30 33 62 66 39 62 39 65 37 37 35 63 39 63 65
+[...]
+0003c0 35
+
+
+
+
+
+Delete Table
+
+
+
+DELETE /<table>/schema
+
+
+Deletes a table.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns HTTP 200 status.
+
+NOTE: DELETE /<table> will not work
+
+Examples:
+
+
+
+% telnet localhost 8000
+DELETE http://localhost:8000/test/schema HTTP/1.0
+
+HTTP/1.1 200 OK
+Content-Length: 0
+
+
+
+
+
+Cell Query (Single Value)
+
+
+
+GET /<table>/<row>/
+ <column> ( : <qualifier> )?
+ ( / <timestamp> )?
+
+
+Retrieves one cell, with optional specification of timestamp.
+Set Accept header to text/xml for XML reply.
+Set Accept header to application/x-protobuf for protobufs.
+Set Accept header to application/octet-stream for binary.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns HTTP 200 status and cell data in the response body in
+the requested encoding. If the encoding is binary, returns row, column, and
+timestamp in X headers: X-Row , X-Column , and
+X-Timestamp , respectively. Depending on the precision of the resource
+specification, some of the X-headers may be elided as redundant.
+
+Examples:
+
+
+
+% curl -H "Accept: text/xml" http://localhost:8000/content/00012614f7d43df6418523445a6787d6/content:raw
+
+HTTP/1.1 200 OK
+Cache-Control: max-age=14400
+Content-Type: text/xml
+Content-Length: 521
+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<CellSet><Row key="MDAwMTI2MTRmN2Q0M2RmNjQxODUyMzQ0NWE2Nzg3ZDY="><Cell timesta
+mp="1244880122250" column="Y29udGVudDpyYXc=">PCFET0NUWVBFIEhUTUwgUFVCTElDICItL
+y9JRVRGLy9EVEQgSFRNTCAyLjAvL0VOIj4KPGh0bWw+PGhlYWQ+Cjx0aXRsZT4zMDEgTW92ZWQgUGV
+ybWFuZW50bHk8L3RpdGxlPgo8L2hlYWQ+PGJvZHk+CjxoMT5Nb3ZlZCBQZXJtYW5lbnRseTwvaDE+C
+jxwPlRoZSBkb2N1bWVudCBoYXMgbW92ZWQgPGEgaHJlZj0iaHR0cDovL3R3aXR0ZXIuY29tL2R1bmN
+hbnJpbGV5Ij5oZXJlPC9hPi48L3A+CjwvYm9keT48L2h0bWw+Cg==</Cell></Row></CellSet>
+
+% curl -H "Accept: application/json" http://localhost:8000/content/00012614f7d43df6418523445a6787d6/content:raw
+
+HTTP/1.1 200 OK
+Cache-Control: max-age=14400
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{"Row":{"@key":"MDAwMTI2MTRmN2Q0M2RmNjQxODUyMzQ0NWE2Nzg3ZDY=","Cell":{"@timest
+amp":"1244880122250","@column":"Y29udGVudDpyYXc=","$":"PCFET0NUWVBFIEhUTUwgUFV
+CTElDICItLy9JRVRGLy9EVEQgSFRNTCAyLjAvL0VOIj4KPGh0bWw+PGhlYWQ+Cjx0aXRsZT4zMDEgT
+W92ZWQgUGVybWFuZW50bHk8L3RpdGxlPgo8L2hlYWQ+PGJvZHk+CjxoMT5Nb3ZlZCBQZXJtYW5lbnR
+seTwvaDE+CjxwPlRoZSBkb2N1bWVudCBoYXMgbW92ZWQgPGEgaHJlZj0iaHR0cDovL3R3aXR0ZXIuY
+29tL2R1bmNhbnJpbGV5Ij5oZXJlPC9hPi48L3A+CjwvYm9keT48L2h0bWw+Cg=="}}}
+
+% curl -H "Accept: application/x-protobuf" http://localhost:8000/content/00012614f7d43df6418523445a6787d6/content:raw
+
+HTTP/1.1 200 OK
+Content-Length: 301
+Cache-Control: max-age=14400
+Content-Type: application/x-protobuf
+
+000000 0a aa 02 0a 20 30 30 30 31 32 36 31 34 66 37 64
+000010 34 33 64 66 36 34 31 38 35 32 33 34 34 35 61 36
+000020 37 38 37 64 36 12 85 02 12 0b 63 6f 6e 74 65 6e
+000030 74 3a 72 61 77 18 8a e3 8c c5 9d 24 22 ee 01 3c
+[...]
+000120 62 6f 64 79 3e 3c 2f 68 74 6d 6c 3e 0a
+
+% curl -H "Accept: application/octet-stream" http://localhost:8000/content/00012614f7d43df6418523445a6787d6/content:raw
+
+HTTP/1.1 200 OK
+Content-Length: 238
+Cache-Control: max-age=14400
+X-Timestamp: 1244880122250
+Content-Type: application/octet-stream
+
+[...]
+
+
+
+
+
+Cell or Row Query (Multiple Values)
+
+
+
+GET /<table>/<row>
+ ( / ( <column> ( : <qualifier> )?
+ ( , <column> ( : <qualifier> )? )+ )?
+ ( / ( <start-timestamp> ',' )? <end-timestamp> )? )?
+ ( ?v= <num-versions> )?
+
+
+Retrieves one or more cells from a full row, or one or more specified columns
+in the row, with optional filtering via timestamp, and an optional restriction
+on the maximum number of versions to return.
+Set Accept header to text/xml for XML reply.
+Set Accept header to application/json for JSON reply.
+Set Accept header to application/x-protobuf for protobufs.
+Set Accept header to application/octet-stream for binary.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns row results in the requested encoding.
+
+NOTE: If binary encoding is requested, only one cell can be returned, the
+first to match the resource specification. The row, column, and timestamp
+associated with the cell will be transmitted in X headers: X-Row ,
+X-Column , and X-Timestamp , respectively. Depending on the
+precision of the resource specification, some of the X-headers may be elided
+as redundant.
+
+Suffix Globbing
+
+Multiple value queries of a row can optionally append a suffix glob on the row
+key. This is a restricted form of scanner which will return all values in all
+rows that have keys which contain the supplied key on their left hand side,
+for example:
+
+
+ org.someorg.*
+ -> org.someorg.blog
+ -> org.someorg.home
+ -> org.someorg.www
+
+
+Examples:
+
+
+
+% curl -H "Accept: text/xml" http://localhost:8000/urls/https|ad.doubleclick.net|*
+
+HTTP/1.1 200 OK
+Cache-Control: max-age=14400
+Content-Type: text/xml
+Transfer-Encoding: chunked
+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<CellSet><Row key="aHR0cHx3d3cudGVsZWdyYXBoLmNvLnVrfDgwfG5ld3N8d29ybGRuZXdzfG5
+vcnRoYW1lcmljYXx1c2F8NTQ5MTI4NHxBcm5vbGQtU2Nod2FyemVuZWdnZXItdW52ZWlscy1wYXBlc
+mxlc3MtY2xhc3Nyb29tcy1wbGFuLmh0bWw="><Cell timestamp="1244701257843" column="a
+W5mbzpjcmF3bGVyLTEyNDQ3MDEyNTc4NDM=">eyJpcCI6IjIwOC41MS4xMzcuOSIsIm1pbWV0eXBlI
+joidGV4dC9odG1sO2NoYXJzZXQ9SVNPLT
+[...]
+</Cell><Cell timestamp="1244701513390" column="aW5mbzp1cmw=">aHR0cDovL3d3dy50Z
+WxlZ3JhcGguY28udWs6ODAvdGVsZWdyYXBoL3RlbXBsYXRlL3ZlcjEtMC90ZW1wbGF0ZXMvZnJhZ21
+lbnRzL2NvbW1vbi90bWdsQnJhbmRDU1MuanNw</Cell></Row></CellSet>
+
+% curl -H "Accept: text/xml" http://localhost:8000/content/00012614f7d43df6418523445a6787d6
+
+HTTP/1.1 200 OK
+Cache-Control: max-age=14400
+Content-Type: text/xml
+Content-Length: 1177
+
+<CellSet><Row key="MDAwMTI2MTRmN2Q0M2RmNjQxODUyMzQ0NWE2Nzg3ZDY="><Cell timesta
+mp="1244880122250" column="Y29udGVudDpyYXc=">PCFET0NUWVBFIEhUTUwgUFVCTElDICItL
+y9JRVRGLy9EVEQgSFRNTCAyLjAvL0VOIj4KPGh0bWw+PGhlYWQ+Cjx0aXRsZT4zMDEgTW92ZWQgUGV
+ybWFuZW50bHk8L3RpdGxlPgo8L2hlYWQ+PGJvZHk+CjxoMT5Nb3ZlZCBQZXJtYW5lbnRseTwvaDE+C
+jxwPlRoZSBkb2N1bWVudCBoYXMgbW92ZWQgPGEgaHJlZj0iaHR0cDovL3R3aXR0ZXIuY29tL2R1bmN
+hbnJpbGV5Ij5oZXJlPC9hPi48L3A+CjwvYm9keT48L2h0bWw+Cg==</Cell><Cell timestamp="1
+244880122250" column="aW5mbzpjcmF3bGVyLWh0dHB8d3d3LnR3aXR0ZXIuY29tfDgwfGR1bmNh
+bnJpbGV5LTEyNDQ4ODAxMjIyNTA=">eyJpcCI6IjE2OC4xNDMuMTYyLjY4IiwibWltZXR5cGUiOiJ0
+ZXh0L2h0bWw7IGNoYXJzZXQ9aXNvLTg4NTktMSIsInZpYSI6Imh0dHA6Ly93d3cuaW5xdWlzaXRyLm
+NvbTo4MC8yNTkyNy90b3NoMC1hbmQtdGhlLWRlbWktbW9vcmUtbnNmdy1waWMvIn0=</Cell><Cell
+timestamp="1244880122250" column="aW5mbzpsZW5ndGg=">MjM4</Cell><Cell timestamp
+="1244880122250" column="aW5mbzptaW1ldHlwZQ==">dGV4dC9odG1sOyBjaGFyc2V0PWlzby0
+4ODU5LTE=</Cell><Cell timestamp="1244880122250" column="dXJsOmh0dHB8d3d3LnR3aX
+R0ZXIuY29tfDgwfGR1bmNhbnJpbGV5">aHR0cDovL3d3dy50d2l0dGVyLmNvbTo4MC9kdW5jYW5yaW
+xleQ==</Cell></Row></CellSet>
+
+% curl -H "Accept: application/json" http://localhost:8000/content/00012614f7d43df6418523445a6787d6
+
+HTTP/1.1 200 OK
+Cache-Control: max-age=14400
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{"Row":{"@key":"MDAwMTI2MTRmN2Q0M2RmNjQxODUyMzQ0NWE2Nzg3ZDY=","Cell":[{"@times
+tamp":"1244880122250","@column":"Y29udGVudDpyYXc=","$":"PCFET0NUWVBFIEhUTUwgUF
+VCTElDICItLy9JRVRGLy9EVEQgSFRNTCAyLjAvL0VOIj4KPGh0bWw+PGhlYWQ+Cjx0aXRsZT4zMDEg
+TW92ZWQgUGVybWFuZW50bHk8L3RpdGxlPgo8L2hlYWQ+PGJvZHk+CjxoMT5Nb3ZlZCBQZXJtYW5lbn
+RseTwvaDE+CjxwPlRoZSBkb2N1bWVudCBoYXMgbW92ZWQgPGEgaHJlZj0iaHR0cDovL3R3aXR0ZXIu
+Y29tL2R1bmNhbnJpbGV5Ij5oZXJlPC9hPi48L3A+CjwvYm9keT48L2h0bWw+Cg=="},{"@timestam
+p":"1244880122250","@column":"aW5mbzpjcmF3bGVyLWh0dHB8d3d3LnR3aXR0ZXIuY29tfDgw
+fGR1bmNhbnJpbGV5LTEyNDQ4ODAxMjIyNTA=","$":"eyJpcCI6IjE2OC4xNDMuMTYyLjY4IiwibWl
+tZXR5cGUiOiJ0ZXh0L2h0bWw7IGNoYXJzZXQ9aXNvLTg4NTktMSIsInZpYSI6Imh0dHA6Ly93d3cua
+W5xdWlzaXRyLmNvbTo4MC8yNTkyNy90b3NoMC1hbmQtdGhlLWRlbWktbW9vcmUtbnNmdy1waWMvIn0
+="},{"@timestamp":"1244880122250","@column":"aW5mbzpsZW5ndGg=","$":"MjM4"},{"@
+timestamp":"1244880122250","@column":"aW5mbzptaW1ldHlwZQ==","$":"dGV4dC9odG1sO
+yBjaGFyc2V0PWlzby04ODU5LTE="},{"@timestamp":"1244880122250","@column":"dXJsOmh
+0dHB8d3d3LnR3aXR0ZXIuY29tfDgwfGR1bmNhbnJpbGV5","$":"aHR0cDovL3d3dy50d2l0dGVyLm
+NvbTo4MC9kdW5jYW5yaWxleQ=="}]}}
+
+
+NOTE: The cell value is given in JSON encoding as the value associated with the key "$".
+
+
+% curl -H "Accept: application/x-protobuf" http://localhost:8000/content/00012614f7d43df6418523445a6787d6
+
+HTTP/1.1 200 OK
+Content-Length: 692
+Cache-Control: max-age=14400
+Content-Type: application/x-protobuf
+
+000000 0a b1 05 0a 20 30 30 30 31 32 36 31 34 66 37 64
+000010 34 33 64 66 36 34 31 38 35 32 33 34 34 35 61 36
+000020 37 38 37 64 36 12 85 02 12 0b 63 6f 6e 74 65 6e
+000030 74 3a 72 61 77 18 8a e3 8c c5 9d 24 22 ee 01 3c
+[...]
+0002b0 69 6c 65 79
+
+
+
+
+
+Cell Store (Single)
+
+
+
+PUT /<table>/<row>/<column>( : <qualifier> )? ( / <timestamp> )?
+
+POST /<table>/<row>/<column>( : <qualifier> )? ( / <timestamp> )?
+
+
+Stores cell data into the specified location.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns HTTP 200 status.
+Set Content-Type header to text/xml for XML encoding.
+Set Content-Type header to application/x-protobuf for protobufs encoding.
+Set Content-Type header to application/octet-stream for binary encoding.
+When using binary encoding, optionally, set X-Timestamp header to the desired
+timestamp.
+
+PUT and POST operations are equivalent here: Specified addresses without
+existing data will create new values. Specified addresses with existing data
+will create new versions, overwriting an existing version if all of { row,
+column:qualifer, timestamp } match that of the existing value.
+
+See "Cell Query (Single Value)" section for encoding examples.
+
+Examples:
+
+
+
+% curl -H "Content-Type: text/xml" --data '[...]' http://localhost:8000/test/testrow/test:testcolumn
+
+HTTP/1.1 200 OK
+Content-Length: 0
+
+
+
+
+
+Cell Store (Multiple)
+
+
+
+PUT /<table>/<false-row-key>
+
+POST /<table>/<false-row-key>
+
+
+Use a false row key. Row, column, and timestamp values in supplied cells
+override the specifications of the same on the path, allowing for posting of
+multiple values to a table in batch. If not successful, returns appropriate
+HTTP error status code. If successful, returns HTTP 200 status.
+Set Content-Type to text/xml for XML encoding.
+Set Content-Type header to application/x-protobuf for protobufs encoding.
+Supply commit data in the PUT or POST body.
+
+PUT and POST operations are equivalent here: Specified addresses without
+existing data will create new values. Specified addresses with existing data
+will create new versions, overwriting an existing version if all of { row,
+column:qualifer, timestamp } match that of the existing value.
+
+See "Cell or Row Query (Multiple Values)" for encoding examples.
+
+
+
+Row, Column, or Cell Delete
+
+
+
+DELETE /<table>/<row>
+ ( / ( <column> ( : <qualifier> )?
+ ( / <timestamp> )? )?
+
+
+Deletes an entire row, a entire column family, or specific cell(s), depending
+on how specific the data address. If not successful, returns appropriate HTTP
+error status code. If successful, returns HTTP 200 status.
+
+NOTE: DELETE /<table> will not work.
+Use DELETE /<table>/schema instead.
+
+
+
+Scanner Creation
+
+
+
+PUT /<table>/scanner
+
+POST /<table>/scanner
+
+
+Allocates a new table scanner.
+If not successful, returns appropriate HTTP error status code.
+If successful, returns HTTP 201 status (created) and the URI which should be
+used to address the scanner, e.g.
+
+
/<table>/scanner/112876541342014107c0fa92
+
+Set Content-Type to text/xml if supplying an XML scanner specification.
+Set Content-Type to application/protobuf if supplying a protobufs
+encoded specification.
+
+Examples:
+
+
+
+% curl -H "Content-Type: text/xml" -d '<Scanner batch="1"/>' http://localhost:8000/content/scanner
+
+HTTP/1.1 201 Created
+Location: http://localhost:8000/content/scanner/12447063229213b1937
+Content-Length: 0
+
+
+
+
+
+Scanner Get Next
+
+
+
+GET /<table>/scanner/<scanner-id>
+
+
+Returns the values of the next cells found by the scanner, up to the configured batch amount.
+Set Accept header to text/xml for XML encoding.
+Set Accept header to application/x-protobuf for protobufs encoding.
+Set Accept header to application/octet-stream for binary encoding.
+If not successful, returns appropriate HTTP error status code.
+If result is successful but the scanner is exhausted, returns HTTP 204 status (no content).
+Otherwise, returns HTTP 200 status and row and cell data in the response body.
+See examples from the "Cell or Row Query (Multiple Values)" section.
+
+NOTE: The binary encoding option returns only one cell regardless of the
+batching parameter supplied during scanner creation. The row, column, and
+timestamp associated with the cell are transmitted as X-headers:
+X-Row , X-Column , and X-Timestamp respectively.
+
+Examples:
+
+
+
+% curl -H "Content-Type: text/xml" http://localhost:8000/content/scanner/12447063229213b1937
+
+HTTP/1.1 200 OK
+Cache-Control: no-cache
+Content-Type: text/xml
+Content-Length: 589
+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<CellSet><Row key="MDAyMDFjMTAwNjk4ZGNkYjU5MDQxNTVkZGQ3OGRlZTk="><Cell timesta
+mp="1244701281234" column="Y29udGVudDpyYXc=">PCFET0NUWVBFIEhUTUwgUFVCTElDICItL
+y9JRVRGLy9EVEQgSFRNTCAyLjAvL0VOIj4KPGh0bWw+PGhlYWQ+Cjx0aXRsZT40MDQgTm90IEZvdW5
+kPC90aXRsZT4KPC9oZWFkPjxib2R5Pgo8aDE+Tm90IEZvdW5kPC9oMT4KPHA+VGhlIHJlcXVlc3RlZ
+CBVUkwgL3JvYm90cy50eHQgd2FzIG5vdCBmb3VuZCBvbiB0aGlzIHNlcnZlci48L3A+Cjxocj4KPGF
+kZHJlc3M+QXBhY2hlLzIuMi4zIChSZWQgSGF0KSBTZXJ2ZXIgYXQgd3gubWduZXR3b3JrLmNvbSBQb
+3J0IDgwPC9hZGRyZXNzPgo8L2JvZHk+PC9odG1sPgo=</Cell></Row></CellSet>
+
+% curl -H "Content-Type: application/json" http://localhost:8000/content/scanner/12447063229213b1937
+
+HTTP/1.1 200 OK
+Cache-Control: no-cache
+Content-Type: application/json
+Transfer-Encoding: chunked
+
+{"Row":{"@key":"MDAyMDFjMTAwNjk4ZGNkYjU5MDQxNTVkZGQ3OGRlZTk=","Cell":{"@timest
+amp":"1244701281234","@column":"aW5mbzpjcmF3bGVyLWh0dHB8d3gubWduZXR3b3JrLmNvbX
+w4MHxyb2JvdHMudHh0LTEyNDQ3MDEyODEyMzQ=","$":"eyJpcCI6IjE5OS4xOTMuMTAuMTAxIiwib
+WltZXR5cGUiOiJ0ZXh0L2h0bWw7IGNoYXJzZXQ9aXNvLTg4NTktMSIsInZpYSI6Imh0dHA6Ly93eC5
+tZ25ldHdvcmsuY29tOjgwL2pzL2N1cnJlbnRzaGFuZGxlci5qcyJ9"}}}
+
+% curl -H "Content-Type: application/x-protobuf" http://localhost:8000/content/scanner/12447063229213b1937
+
+HTTP/1.1 200 OK
+Content-Length: 63
+Cache-Control: no-cache
+Content-Type: application/x-protobuf
+
+000000 0a 3d 0a 20 30 30 32 30 31 63 31 30 30 36 39 38
+000010 64 63 64 62 35 39 30 34 31 35 35 64 64 64 37 38
+000020 64 65 65 39 12 19 12 0b 69 6e 66 6f 3a 6c 65 6e
+000030 67 74 68 18 d2 97 e9 ef 9c 24 22 03 32 39 30
+
+% curl -H "Content-Type: application/octet-stream" http://localhost:8000/content/scanner/12447063229213b1937
+
+HTTP/1.1 200 OK
+Content-Length: 37
+Cache-Control: no-cache
+X-Column: dXJsOmh0dHB8d3gubWduZXR3b3JrLmNvbXw4MHxyb2JvdHMudHh0
+X-Row: MDAyMDFjMTAwNjk4ZGNkYjU5MDQxNTVkZGQ3OGRlZTk=
+X-Timestamp: 1244701281234
+Content-Type: application/octet-stream
+
+000000 68 74 74 70 3a 2f 2f 77 78 2e 6d 67 6e 65 74 77
+000010 6f 72 6b 2e 63 6f 6d 3a 38 30 2f 72 6f 62 6f 74
+000020 73 2e 74 78 74
+
+
+
+
+
+Scanner Deletion
+
+
+
+DELETE /<table>/scanner/<scanner-id>
+
+
+Deletes resources associated with the scanner. This is an optional action.
+Scanners will expire after some globally configurable interval has elapsed
+with no activity on the scanner. If not successful, returns appropriate HTTP
+error status code. If successful, returns HTTP status 200.
+
+Examples:
+
+
+
+% telnet localhost 8000
+DELETE http://localhost:8000/content/scanner/12447063229213b1937 HTTP/1.0
+
+HTTP/1.1 200 OK
+Content-Length: 0
+
+
+
+
+
+
+XML Schema
+
+
+
+<schema targetNamespace="StargateSchema" elementFormDefault="qualified"
+xmlns="http://www.w3.org/2001/XMLSchema" xmlns:tns="StargateSchema">
+
+ <element name="CellSet" type="tns:CellSet"></element>
+
+ <complexType name="CellSet">
+ <sequence>
+ <element name="row" type="tns:Row" maxOccurs="unbounded" minOccurs="1"></element>
+ </sequence>
+ </complexType>
+
+ <complexType name="Row">
+ <sequence>
+ <element name="key" type="base64Binary"></element>
+ <element name="cell" type="tns:Cell" maxOccurs="unbounded" minOccurs="1"></element>
+ </sequence>
+ </complexType>
+
+ <complexType name="Cell">
+ <sequence>
+ <element name="value" maxOccurs="1" minOccurs="1"><simpleType><restriction base="base64Binary"></restriction></simpleType></element>
+ </sequence>
+ <attribute name="column" type="base64Binary" />
+ <attribute name="timestamp" type="int" />
+ </complexType>
+
+ <element name="Version" type="tns:Version"></element>
+
+ <complexType name="Version">
+ <attribute name="Stargate" type="string"></attribute>
+ <attribute name="JVM" type="string"></attribute>
+ <attribute name="OS" type="string"></attribute>
+ <attribute name="Server" type="string"></attribute>
+ <attribute name="Jersey" type="string"></attribute>
+ </complexType>
+
+
+ <element name="TableList" type="tns:TableList"></element>
+
+ <complexType name="TableList">
+ <sequence>
+ <element name="table" type="tns:Table" maxOccurs="unbounded" minOccurs="1"></element>
+ </sequence>
+ </complexType>
+
+ <complexType name="Table">
+ <sequence>
+ <element name="name" type="string"></element>
+ </sequence>
+ </complexType>
+
+ <element name="TableInfo" type="tns:TableInfo"></element>
+
+ <complexType name="TableInfo">
+ <sequence>
+ <element name="region" type="tns:TableRegion" maxOccurs="unbounded" minOccurs="1"></element>
+ </sequence>
+ <attribute name="name" type="string"></attribute>
+ </complexType>
+
+ <complexType name="TableRegion">
+ <attribute name="name" type="string"></attribute>
+ <attribute name="id" type="int"></attribute>
+ <attribute name="startKey" type="base64Binary"></attribute>
+ <attribute name="endKey" type="base64Binary"></attribute>
+ <attribute name="location" type="string"></attribute>
+ </complexType>
+
+ <element name="TableSchema" type="tns:TableSchema"></element>
+
+ <complexType name="TableSchema">
+ <sequence>
+ <element name="column" type="tns:ColumnSchema" maxOccurs="unbounded" minOccurs="1"></element>
+ </sequence>
+ <attribute name="name" type="string"></attribute>
+ <anyAttribute></anyAttribute>
+ </complexType>
+
+ <complexType name="ColumnSchema">
+ <attribute name="name" type="string"></attribute>
+ <anyAttribute></anyAttribute>
+ </complexType>
+
+ <element name="Scanner" type="tns:Scanner"></element>
+
+ <complexType name="Scanner">
+ <attribute name="startRow" type="base64Binary"></attribute>
+ <attribute name="endRow" type="base64Binary"></attribute>
+ <attribute name="columns" type="base64Binary"></attribute>
+ <attribute name="batch" type="int"></attribute>
+ <attribute name="startTime" type="int"></attribute>
+ <attribute name="endTime" type="int"></attribute>
+ </complexType>
+
+ <element name="StorageClusterVersion"
+ type="tns:StorageClusterVersion">
+ </element>
+
+ <complexType name="StorageClusterVersion">
+ <attribute name="version" type="string"></attribute>
+ </complexType>
+
+ <element name="StorageClusterStatus"
+ type="tns:StorageClusterStatus">
+ </element>
+
+ <complexType name="StorageClusterStatus">
+ <sequence>
+ <element name="liveNode" type="tns:Node"
+ maxOccurs="unbounded" minOccurs="0">
+ </element>
+ <element name="deadNode" type="string" maxOccurs="unbounded"
+ minOccurs="0">
+ </element>
+ </sequence>
+ <attribute name="regions" type="int"></attribute>
+ <attribute name="requests" type="int"></attribute>
+ <attribute name="averageLoad" type="float"></attribute>
+ </complexType>
+
+ <complexType name="Node">
+ <sequence>
+ <element name="region" type="tns:Region" maxOccurs="unbounded" minOccurs="0"></element>
+ </sequence>
+ <attribute name="name" type="string"></attribute>
+ <attribute name="startCode" type="int"></attribute>
+ <attribute name="requests" type="int"></attribute>
+ <attribute name="heapSizeMB" type="int"></attribute>
+ <attribute name="maxHeapSizeMB" type="int"></attribute>
+ </complexType>
+
+ <complexType name="Region">
+ <attribute name="name" type="base64Binary"></attribute>
+ <attribute name="stores" type="int"></attribute>
+ <attribute name="storefiles" type="int"></attribute>
+ <attribute name="storefileSizeMB" type="int"></attribute>
+ <attribute name="memstoreSizeMB" type="int"></attribute>
+ <attribute name="storefileIndexSizeMB" type="int"></attribute>
+ </complexType>
+</schema>
+
+
+
+
+Protobufs Schema
+
+
+
+message Version {
+ optional string stargateVersion = 1;
+ optional string jvmVersion = 2;
+ optional string osVersion = 3;
+ optional string serverVersion = 4;
+ optional string jerseyVersion = 5;
+}
+
+message StorageClusterStatus {
+ message Region {
+ required bytes name = 1;
+ optional int32 stores = 2;
+ optional int32 storefiles = 3;
+ optional int32 storefileSizeMB = 4;
+ optional int32 memstoreSizeMB = 5;
+ optional int32 storefileIndexSizeMB = 6;
+ }
+ message Node {
+ required string name = 1; // name:port
+ optional int64 startCode = 2;
+ optional int32 requests = 3;
+ optional int32 heapSizeMB = 4;
+ optional int32 maxHeapSizeMB = 5;
+ repeated Region regions = 6;
+ }
+ // node status
+ repeated Node liveNodes = 1;
+ repeated string deadNodes = 2;
+ // summary statistics
+ optional int32 regions = 3;
+ optional int32 requests = 4;
+ optional double averageLoad = 5;
+}
+
+message TableList {
+ repeated string name = 1;
+}
+
+message TableInfo {
+ required string name = 1;
+ message Region {
+ required string name = 1;
+ optional bytes startKey = 2;
+ optional bytes endKey = 3;
+ optional int64 id = 4;
+ optional string location = 5;
+ }
+ repeated Region regions = 2;
+}
+
+message TableSchema {
+ optional string name = 1;
+ message Attribute {
+ required string name = 1;
+ required string value = 2;
+ }
+ repeated Attribute attrs = 2;
+ repeated ColumnSchema columns = 3;
+ // optional helpful encodings of commonly used attributes
+ optional bool inMemory = 4;
+ optional bool readOnly = 5;
+}
+
+message ColumnSchema {
+ optional string name = 1;
+ message Attribute {
+ required string name = 1;
+ required string value = 2;
+ }
+ repeated Attribute attrs = 2;
+ // optional helpful encodings of commonly used attributes
+ optional int32 ttl = 3;
+ optional int32 maxVersions = 4;
+ optional string compression = 5;
+}
+
+message Cell {
+ optional bytes row = 1; // unused if Cell is in a CellSet
+ optional bytes column = 2;
+ optional int64 timestamp = 3;
+ optional bytes data = 4;
+}
+
+message CellSet {
+ message Row {
+ required bytes key = 1;
+ repeated Cell values = 2;
+ }
+ repeated Row rows = 1;
+}
+
+message Scanner {
+ optional bytes startRow = 1;
+ optional bytes endRow = 2;
+ repeated bytes columns = 3;
+ optional int32 batch = 4;
+ optional int64 startTime = 5;
+ optional int64 endTime = 6;
+}
+
+
+
+
diff --git a/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/model/ModelSchema.xsd b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/model/ModelSchema.xsd
new file mode 100755
index 0000000..9ab85d5
--- /dev/null
+++ b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/model/ModelSchema.xsd
@@ -0,0 +1,144 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/CellMessage.proto b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/CellMessage.proto
new file mode 100644
index 0000000..c3e256d
--- /dev/null
+++ b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/CellMessage.proto
@@ -0,0 +1,26 @@
+// Copyright 2010 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message Cell {
+ optional bytes row = 1; // unused if Cell is in a CellSet
+ optional bytes column = 2;
+ optional int64 timestamp = 3;
+ optional bytes data = 4;
+}
diff --git a/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/CellSetMessage.proto b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/CellSetMessage.proto
new file mode 100644
index 0000000..aa45193
--- /dev/null
+++ b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/CellSetMessage.proto
@@ -0,0 +1,29 @@
+// Copyright 2010 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import "CellMessage.proto";
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message CellSet {
+ message Row {
+ required bytes key = 1;
+ repeated Cell values = 2;
+ }
+ repeated Row rows = 1;
+}
diff --git a/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/ColumnSchemaMessage.proto b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/ColumnSchemaMessage.proto
new file mode 100644
index 0000000..bc3a034
--- /dev/null
+++ b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/ColumnSchemaMessage.proto
@@ -0,0 +1,32 @@
+// Copyright 2010 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message ColumnSchema {
+ optional string name = 1;
+ message Attribute {
+ required string name = 1;
+ required string value = 2;
+ }
+ repeated Attribute attrs = 2;
+ // optional helpful encodings of commonly used attributes
+ optional int32 ttl = 3;
+ optional int32 maxVersions = 4;
+ optional string compression = 5;
+}
diff --git a/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/ScannerMessage.proto b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/ScannerMessage.proto
new file mode 100644
index 0000000..07fa213
--- /dev/null
+++ b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/ScannerMessage.proto
@@ -0,0 +1,30 @@
+// Copyright 2010 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message Scanner {
+ optional bytes startRow = 1;
+ optional bytes endRow = 2;
+ repeated bytes columns = 3;
+ optional int32 batch = 4;
+ optional int64 startTime = 5;
+ optional int64 endTime = 6;
+ optional int32 maxVersions = 7;
+ optional string filter = 8;
+}
diff --git a/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/StorageClusterStatusMessage.proto b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/StorageClusterStatusMessage.proto
new file mode 100644
index 0000000..ca12dc7
--- /dev/null
+++ b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/StorageClusterStatusMessage.proto
@@ -0,0 +1,45 @@
+// Copyright 2010 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message StorageClusterStatus {
+ message Region {
+ required bytes name = 1;
+ optional int32 stores = 2;
+ optional int32 storefiles = 3;
+ optional int32 storefileSizeMB = 4;
+ optional int32 memstoreSizeMB = 5;
+ optional int32 storefileIndexSizeMB = 6;
+ }
+ message Node {
+ required string name = 1; // name:port
+ optional int64 startCode = 2;
+ optional int32 requests = 3;
+ optional int32 heapSizeMB = 4;
+ optional int32 maxHeapSizeMB = 5;
+ repeated Region regions = 6;
+ }
+ // node status
+ repeated Node liveNodes = 1;
+ repeated string deadNodes = 2;
+ // summary statistics
+ optional int32 regions = 3;
+ optional int32 requests = 4;
+ optional double averageLoad = 5;
+}
diff --git a/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/TableInfoMessage.proto b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/TableInfoMessage.proto
new file mode 100644
index 0000000..ecc35ec
--- /dev/null
+++ b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/TableInfoMessage.proto
@@ -0,0 +1,31 @@
+// Copyright 2010 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message TableInfo {
+ required string name = 1;
+ message Region {
+ required string name = 1;
+ optional bytes startKey = 2;
+ optional bytes endKey = 3;
+ optional int64 id = 4;
+ optional string location = 5;
+ }
+ repeated Region regions = 2;
+}
diff --git a/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/TableListMessage.proto b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/TableListMessage.proto
new file mode 100644
index 0000000..f6d00cc
--- /dev/null
+++ b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/TableListMessage.proto
@@ -0,0 +1,23 @@
+// Copyright 2010 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message TableList {
+ repeated string name = 1;
+}
diff --git a/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/TableSchemaMessage.proto b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/TableSchemaMessage.proto
new file mode 100644
index 0000000..96df380
--- /dev/null
+++ b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/TableSchemaMessage.proto
@@ -0,0 +1,34 @@
+// Copyright 2010 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import "ColumnSchemaMessage.proto";
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message TableSchema {
+ optional string name = 1;
+ message Attribute {
+ required string name = 1;
+ required string value = 2;
+ }
+ repeated Attribute attrs = 2;
+ repeated ColumnSchema columns = 3;
+ // optional helpful encodings of commonly used attributes
+ optional bool inMemory = 4;
+ optional bool readOnly = 5;
+}
diff --git a/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/VersionMessage.proto b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/VersionMessage.proto
new file mode 100644
index 0000000..046bc33
--- /dev/null
+++ b/contrib/stargate/core/src/main/resources/org/apache/hadoop/hbase/stargate/protobuf/VersionMessage.proto
@@ -0,0 +1,27 @@
+// Copyright 2010 The Apache Software Foundation
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package org.apache.hadoop.hbase.stargate.protobuf.generated;
+
+message Version {
+ optional string stargateVersion = 1;
+ optional string jvmVersion = 2;
+ optional string osVersion = 3;
+ optional string serverVersion = 4;
+ optional string jerseyVersion = 5;
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/MiniClusterTestBase.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/MiniClusterTestBase.java
new file mode 100644
index 0000000..939d247
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/MiniClusterTestBase.java
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.MiniZooKeeperCluster;
+import org.apache.hadoop.hbase.client.HConnectionManager;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.mortbay.jetty.Server;
+import org.mortbay.jetty.servlet.Context;
+import org.mortbay.jetty.servlet.ServletHolder;
+
+import com.sun.jersey.spi.container.servlet.ServletContainer;
+
+import junit.framework.TestCase;
+
+public class MiniClusterTestBase extends TestCase {
+ protected static final Log LOG =
+ LogFactory.getLog(MiniClusterTestBase.class);
+
+ public static final String MIMETYPE_BINARY = "application/octet-stream";
+ public static final String MIMETYPE_JSON = "application/json";
+ public static final String MIMETYPE_PLAIN = "text/plain";
+ public static final String MIMETYPE_PROTOBUF = "application/x-protobuf";
+ public static final String MIMETYPE_XML = "text/xml";
+
+ // use a nonstandard port
+ public static final int DEFAULT_TEST_PORT = 38080;
+
+ protected static Configuration conf = HBaseConfiguration.create();
+ protected static MiniZooKeeperCluster zooKeeperCluster;
+ protected static MiniHBaseCluster hbaseCluster;
+ protected static MiniDFSCluster dfsCluster;
+ protected static File testDir;
+ protected static int testServletPort;
+ protected static Server server;
+
+ public static boolean isMiniClusterRunning() {
+ return server != null;
+ }
+
+ private static void startDFS() throws Exception {
+ if (dfsCluster != null) {
+ LOG.error("MiniDFSCluster already running");
+ return;
+ }
+ // This spews a bunch of warnings about missing scheme. TODO: fix.
+ dfsCluster = new MiniDFSCluster(0, conf, 2, true, true, true,
+ null, null, null, null);
+ // mangle the conf so that the fs parameter points to the minidfs we
+ // just started up
+ FileSystem filesystem = dfsCluster.getFileSystem();
+ conf.set("fs.defaultFS", filesystem.getUri().toString());
+ Path parentdir = filesystem.getHomeDirectory();
+ conf.set(HConstants.HBASE_DIR, parentdir.toString());
+ filesystem.mkdirs(parentdir);
+ FSUtils.setVersion(filesystem, parentdir);
+ }
+
+ private static void stopDFS() {
+ if (dfsCluster != null) try {
+ FileSystem fs = dfsCluster.getFileSystem();
+ if (fs != null) {
+ LOG.info("Shutting down FileSystem");
+ fs.close();
+ }
+ FileSystem.closeAll();
+ dfsCluster = null;
+ } catch (Exception e) {
+ LOG.warn(StringUtils.stringifyException(e));
+ }
+ }
+
+ private static void startZooKeeper() throws Exception {
+ if (zooKeeperCluster != null) {
+ LOG.error("ZooKeeper already running");
+ return;
+ }
+ zooKeeperCluster = new MiniZooKeeperCluster();
+ zooKeeperCluster.startup(testDir);
+ LOG.info("started " + zooKeeperCluster.getClass().getName());
+ }
+
+ private static void stopZooKeeper() {
+ if (zooKeeperCluster != null) try {
+ zooKeeperCluster.shutdown();
+ zooKeeperCluster = null;
+ } catch (Exception e) {
+ LOG.warn(StringUtils.stringifyException(e));
+ }
+ }
+
+ private static void startHBase() throws Exception {
+ if (hbaseCluster != null) {
+ LOG.error("MiniHBaseCluster already running");
+ return;
+ }
+ hbaseCluster = new MiniHBaseCluster(conf, 1);
+ // opening the META table ensures that cluster is running
+ new HTable(conf, HConstants.META_TABLE_NAME);
+ LOG.info("started MiniHBaseCluster");
+ }
+
+ private static void stopHBase() {
+ if (hbaseCluster != null) try {
+ HConnectionManager.deleteConnectionInfo(conf, true);
+ hbaseCluster.shutdown();
+ hbaseCluster = null;
+ } catch (Exception e) {
+ LOG.warn(StringUtils.stringifyException(e));
+ }
+ }
+
+ private static void startServletContainer() throws Exception {
+ if (server != null) {
+ LOG.error("ServletContainer already running");
+ return;
+ }
+
+ // set up the Jersey servlet container for Jetty
+ ServletHolder sh = new ServletHolder(ServletContainer.class);
+ sh.setInitParameter(
+ "com.sun.jersey.config.property.resourceConfigClass",
+ ResourceConfig.class.getCanonicalName());
+ sh.setInitParameter("com.sun.jersey.config.property.packages",
+ "jetty");
+
+ LOG.info("configured " + ServletContainer.class.getName());
+
+ // set up Jetty and run the embedded server
+ testServletPort = conf.getInt("test.stargate.port", DEFAULT_TEST_PORT);
+ server = new Server(testServletPort);
+ server.setSendServerVersion(false);
+ server.setSendDateHeader(false);
+ // set up context
+ Context context = new Context(server, "/", Context.SESSIONS);
+ context.addServlet(sh, "/*");
+ // start the server
+ server.start();
+
+ LOG.info("started " + server.getClass().getName() + " on port " +
+ testServletPort);
+ }
+
+ private static void stopServletContainer() {
+ if (server != null) try {
+ server.stop();
+ server = null;
+ } catch (Exception e) {
+ LOG.warn(StringUtils.stringifyException(e));
+ }
+ }
+
+ public static void startMiniCluster() throws Exception {
+ try {
+ startDFS();
+ startZooKeeper();
+ startHBase();
+ startServletContainer();
+ } catch (Exception e) {
+ stopServletContainer();
+ stopHBase();
+ stopZooKeeper();
+ stopDFS();
+ throw e;
+ }
+ }
+
+ public static void stopMiniCluster() {
+ stopServletContainer();
+ stopHBase();
+ stopZooKeeper();
+ stopDFS();
+ }
+
+ static class MiniClusterShutdownThread extends Thread {
+ public void run() {
+ stopMiniCluster();
+ Path path = new Path(
+ conf.get("test.build.data",
+ System.getProperty("test.build.data", "build/test/data")));
+ try {
+ FileSystem.get(conf).delete(path, true);
+ } catch (IOException e) {
+ LOG.error(StringUtils.stringifyException(e));
+ }
+ }
+ }
+
+ @Override
+ protected void setUp() throws Exception {
+ // start the mini cluster if it is not running yet
+ if (!isMiniClusterRunning()) {
+ startMiniCluster();
+ Runtime.getRuntime().addShutdownHook(new MiniClusterShutdownThread());
+ }
+
+ // tell HttpClient to dump request and response headers into the test
+ // log at DEBUG level
+ Logger.getLogger("httpclient.wire.header").setLevel(Level.DEBUG);
+
+ super.setUp();
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/Test00MiniCluster.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/Test00MiniCluster.java
new file mode 100644
index 0000000..9148798
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/Test00MiniCluster.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.HTable;
+
+public class Test00MiniCluster extends MiniClusterTestBase {
+ public void testDFSMiniCluster() {
+ assertNotNull(dfsCluster);
+ }
+
+ public void testZooKeeperMiniCluster() {
+ assertNotNull(zooKeeperCluster);
+ }
+
+ public void testHBaseMiniCluster() throws IOException {
+ assertNotNull(hbaseCluster);
+ assertNotNull(new HTable(conf, HConstants.META_TABLE_NAME));
+ }
+
+ public void testStargateServlet() throws IOException {
+ assertNotNull(server);
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestRowResource.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestRowResource.java
new file mode 100644
index 0000000..44be440
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestRowResource.java
@@ -0,0 +1,383 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.StringWriter;
+import java.net.URLEncoder;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Marshaller;
+import javax.xml.bind.Unmarshaller;
+
+import org.apache.commons.httpclient.Header;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.stargate.client.Client;
+import org.apache.hadoop.hbase.stargate.client.Cluster;
+import org.apache.hadoop.hbase.stargate.client.Response;
+import org.apache.hadoop.hbase.stargate.model.CellModel;
+import org.apache.hadoop.hbase.stargate.model.CellSetModel;
+import org.apache.hadoop.hbase.stargate.model.RowModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class TestRowResource extends MiniClusterTestBase {
+ private static final String TABLE = "TestRowResource";
+ private static final String COLUMN_1 = "a:";
+ private static final String COLUMN_2 = "b:";
+ private static final String ROW_1 = "testrow1";
+ private static final String VALUE_1 = "testvalue1";
+ private static final String ROW_2 = "testrow2";
+ private static final String VALUE_2 = "testvalue2";
+ private static final String ROW_3 = "testrow3";
+ private static final String VALUE_3 = "testvalue3";
+ private static final String ROW_4 = "testrow4";
+ private static final String VALUE_4 = "testvalue4";
+
+ private Client client;
+ private JAXBContext context;
+ private Marshaller marshaller;
+ private Unmarshaller unmarshaller;
+ private HBaseAdmin admin;
+
+ public TestRowResource() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(
+ CellModel.class,
+ CellSetModel.class,
+ RowModel.class);
+ marshaller = context.createMarshaller();
+ unmarshaller = context.createUnmarshaller();
+ }
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ client = new Client(new Cluster().add("localhost", testServletPort));
+ admin = new HBaseAdmin(conf);
+ if (admin.tableExists(TABLE)) {
+ return;
+ }
+ HTableDescriptor htd = new HTableDescriptor(TABLE);
+ htd.addFamily(new HColumnDescriptor(KeyValue.parseColumn(
+ Bytes.toBytes(COLUMN_1))[0]));
+ htd.addFamily(new HColumnDescriptor(KeyValue.parseColumn(
+ Bytes.toBytes(COLUMN_2))[0]));
+ admin.createTable(htd);
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ client.shutdown();
+ super.tearDown();
+ }
+
+ private Response deleteRow(String table, String row) throws IOException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ Response response = client.delete(path.toString());
+ Thread.yield();
+ return response;
+ }
+
+ private Response deleteValue(String table, String row, String column)
+ throws IOException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ path.append('/');
+ path.append(column);
+ Response response = client.delete(path.toString());
+ Thread.yield();
+ return response;
+ }
+
+ private Response getValueXML(String table, String row, String column)
+ throws IOException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ path.append('/');
+ path.append(column);
+ Response response = client.get(path.toString(), MIMETYPE_XML);
+ return response;
+ }
+
+ private Response getValuePB(String table, String row, String column)
+ throws IOException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ path.append('/');
+ path.append(column);
+ Response response = client.get(path.toString(), MIMETYPE_PROTOBUF);
+ return response;
+ }
+
+ private Response putValueXML(String table, String row, String column,
+ String value) throws IOException, JAXBException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ path.append('/');
+ path.append(column);
+ RowModel rowModel = new RowModel(row);
+ rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(value)));
+ CellSetModel cellSetModel = new CellSetModel();
+ cellSetModel.addRow(rowModel);
+ StringWriter writer = new StringWriter();
+ marshaller.marshal(cellSetModel, writer);
+ Response response = client.put(path.toString(), MIMETYPE_XML,
+ Bytes.toBytes(writer.toString()));
+ Thread.yield();
+ return response;
+ }
+
+ private void checkValueXML(String table, String row, String column,
+ String value) throws IOException, JAXBException {
+ Response response = getValueXML(table, row, column);
+ assertEquals(response.getCode(), 200);
+ CellSetModel cellSet = (CellSetModel)
+ unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
+ RowModel rowModel = cellSet.getRows().get(0);
+ CellModel cell = rowModel.getCells().get(0);
+ assertEquals(Bytes.toString(cell.getColumn()), column);
+ assertEquals(Bytes.toString(cell.getValue()), value);
+ }
+
+ private Response putValuePB(String table, String row, String column,
+ String value) throws IOException {
+ StringBuilder path = new StringBuilder();
+ path.append('/');
+ path.append(table);
+ path.append('/');
+ path.append(row);
+ path.append('/');
+ path.append(column);
+ RowModel rowModel = new RowModel(row);
+ rowModel.addCell(new CellModel(Bytes.toBytes(column), Bytes.toBytes(value)));
+ CellSetModel cellSetModel = new CellSetModel();
+ cellSetModel.addRow(rowModel);
+ Response response = client.put(path.toString(), MIMETYPE_PROTOBUF,
+ cellSetModel.createProtobufOutput());
+ Thread.yield();
+ return response;
+ }
+
+ private void checkValuePB(String table, String row, String column,
+ String value) throws IOException {
+ Response response = getValuePB(table, row, column);
+ assertEquals(response.getCode(), 200);
+ CellSetModel cellSet = new CellSetModel();
+ cellSet.getObjectFromMessage(response.getBody());
+ RowModel rowModel = cellSet.getRows().get(0);
+ CellModel cell = rowModel.getCells().get(0);
+ assertEquals(Bytes.toString(cell.getColumn()), column);
+ assertEquals(Bytes.toString(cell.getValue()), value);
+ }
+
+ public void testDelete() throws IOException, JAXBException {
+ Response response;
+
+ response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ assertEquals(response.getCode(), 200);
+ response = putValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2);
+ assertEquals(response.getCode(), 200);
+ checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2);
+
+ response = deleteValue(TABLE, ROW_1, COLUMN_1);
+ assertEquals(response.getCode(), 200);
+ response = getValueXML(TABLE, ROW_1, COLUMN_1);
+ assertEquals(response.getCode(), 404);
+ checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2);
+
+ response = deleteRow(TABLE, ROW_1);
+ assertEquals(response.getCode(), 200);
+ response = getValueXML(TABLE, ROW_1, COLUMN_1);
+ assertEquals(response.getCode(), 404);
+ response = getValueXML(TABLE, ROW_1, COLUMN_2);
+ assertEquals(response.getCode(), 404);
+ }
+
+ public void testSingleCellGetPutXML() throws IOException, JAXBException {
+ Response response = getValueXML(TABLE, ROW_1, COLUMN_1);
+ assertEquals(response.getCode(), 404);
+
+ response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ assertEquals(response.getCode(), 200);
+ checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_2);
+ assertEquals(response.getCode(), 200);
+ checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_2);
+
+ response = deleteRow(TABLE, ROW_1);
+ assertEquals(response.getCode(), 200);
+ }
+
+ public void testSingleCellGetPutPB() throws IOException, JAXBException {
+ Response response = getValuePB(TABLE, ROW_1, COLUMN_1);
+ assertEquals(response.getCode(), 404);
+
+ response = putValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ assertEquals(response.getCode(), 200);
+ checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
+
+ response = putValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ assertEquals(response.getCode(), 200);
+ checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ response = putValueXML(TABLE, ROW_1, COLUMN_1, VALUE_2);
+ assertEquals(response.getCode(), 200);
+ checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_2);
+
+ response = deleteRow(TABLE, ROW_1);
+ assertEquals(response.getCode(), 200);
+ }
+
+ public void testSingleCellGetPutBinary() throws IOException {
+ final String path = "/" + TABLE + "/" + ROW_3 + "/" + COLUMN_1;
+ final byte[] body = Bytes.toBytes(VALUE_3);
+ Response response = client.put(path, MIMETYPE_BINARY, body);
+ assertEquals(response.getCode(), 200);
+ Thread.yield();
+
+ response = client.get(path, MIMETYPE_BINARY);
+ assertEquals(response.getCode(), 200);
+ assertTrue(Bytes.equals(response.getBody(), body));
+ boolean foundTimestampHeader = false;
+ for (Header header: response.getHeaders()) {
+ if (header.getName().equals("X-Timestamp")) {
+ foundTimestampHeader = true;
+ break;
+ }
+ }
+ assertTrue(foundTimestampHeader);
+
+ response = deleteRow(TABLE, ROW_3);
+ assertEquals(response.getCode(), 200);
+ }
+
+ public void testSingleCellGetJSON() throws IOException, JAXBException {
+ final String path = "/" + TABLE + "/" + ROW_4 + "/" + COLUMN_1;
+ Response response = client.put(path, MIMETYPE_BINARY,
+ Bytes.toBytes(VALUE_4));
+ assertEquals(response.getCode(), 200);
+ Thread.yield();
+ response = client.get(path, MIMETYPE_JSON);
+ assertEquals(response.getCode(), 200);
+ response = deleteRow(TABLE, ROW_4);
+ assertEquals(response.getCode(), 200);
+ }
+
+ public void testURLEncodedKey() throws IOException, JAXBException {
+ String encodedKey = URLEncoder.encode("http://www.google.com/",
+ HConstants.UTF8_ENCODING);
+ Response response;
+ response = putValueXML(TABLE, encodedKey, COLUMN_1, VALUE_1);
+ assertEquals(response.getCode(), 200);
+ response = putValuePB(TABLE, encodedKey, COLUMN_2, VALUE_2);
+ assertEquals(response.getCode(), 200);
+ checkValuePB(TABLE, encodedKey, COLUMN_1, VALUE_1);
+ checkValueXML(TABLE, encodedKey, COLUMN_2, VALUE_2);
+ }
+
+ public void testMultiCellGetPutXML() throws IOException, JAXBException {
+ String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row
+
+ CellSetModel cellSetModel = new CellSetModel();
+ RowModel rowModel = new RowModel(ROW_1);
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2)));
+ cellSetModel.addRow(rowModel);
+ rowModel = new RowModel(ROW_2);
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4)));
+ cellSetModel.addRow(rowModel);
+ StringWriter writer = new StringWriter();
+ marshaller.marshal(cellSetModel, writer);
+ Response response = client.put(path, MIMETYPE_XML,
+ Bytes.toBytes(writer.toString()));
+ Thread.yield();
+
+ // make sure the fake row was not actually created
+ response = client.get(path, MIMETYPE_XML);
+ assertEquals(response.getCode(), 404);
+
+ // check that all of the values were created
+ checkValueXML(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ checkValueXML(TABLE, ROW_1, COLUMN_2, VALUE_2);
+ checkValueXML(TABLE, ROW_2, COLUMN_1, VALUE_3);
+ checkValueXML(TABLE, ROW_2, COLUMN_2, VALUE_4);
+
+ response = deleteRow(TABLE, ROW_1);
+ assertEquals(response.getCode(), 200);
+ response = deleteRow(TABLE, ROW_2);
+ assertEquals(response.getCode(), 200);
+ }
+
+ public void testMultiCellGetPutPB() throws IOException {
+ String path = "/" + TABLE + "/fakerow"; // deliberate nonexistent row
+
+ CellSetModel cellSetModel = new CellSetModel();
+ RowModel rowModel = new RowModel(ROW_1);
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_1)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_2)));
+ cellSetModel.addRow(rowModel);
+ rowModel = new RowModel(ROW_2);
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_1), Bytes.toBytes(VALUE_3)));
+ rowModel.addCell(new CellModel(Bytes.toBytes(COLUMN_2), Bytes.toBytes(VALUE_4)));
+ cellSetModel.addRow(rowModel);
+ Response response = client.put(path, MIMETYPE_PROTOBUF,
+ cellSetModel.createProtobufOutput());
+ Thread.yield();
+
+ // make sure the fake row was not actually created
+ response = client.get(path, MIMETYPE_PROTOBUF);
+ assertEquals(response.getCode(), 404);
+
+ // check that all of the values were created
+ checkValuePB(TABLE, ROW_1, COLUMN_1, VALUE_1);
+ checkValuePB(TABLE, ROW_1, COLUMN_2, VALUE_2);
+ checkValuePB(TABLE, ROW_2, COLUMN_1, VALUE_3);
+ checkValuePB(TABLE, ROW_2, COLUMN_2, VALUE_4);
+
+ response = deleteRow(TABLE, ROW_1);
+ assertEquals(response.getCode(), 200);
+ response = deleteRow(TABLE, ROW_2);
+ assertEquals(response.getCode(), 200);
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestScannerResource.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestScannerResource.java
new file mode 100644
index 0000000..207a64d
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestScannerResource.java
@@ -0,0 +1,275 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.StringWriter;
+import java.util.Iterator;
+import java.util.Random;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Marshaller;
+import javax.xml.bind.Unmarshaller;
+
+import org.apache.commons.httpclient.Header;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.stargate.client.Client;
+import org.apache.hadoop.hbase.stargate.client.Cluster;
+import org.apache.hadoop.hbase.stargate.client.Response;
+import org.apache.hadoop.hbase.stargate.model.CellModel;
+import org.apache.hadoop.hbase.stargate.model.CellSetModel;
+import org.apache.hadoop.hbase.stargate.model.RowModel;
+import org.apache.hadoop.hbase.stargate.model.ScannerModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class TestScannerResource extends MiniClusterTestBase {
+ private static final String TABLE = "TestScannerResource";
+ private static final String COLUMN_1 = "a:";
+ private static final String COLUMN_2 = "b:";
+
+ private static int expectedRows1;
+ private static int expectedRows2;
+
+ private Client client;
+ private JAXBContext context;
+ private Marshaller marshaller;
+ private Unmarshaller unmarshaller;
+ private HBaseAdmin admin;
+
+ private int insertData(String tableName, String column, double prob)
+ throws IOException {
+ Random rng = new Random();
+ int count = 0;
+ HTable table = new HTable(conf, tableName);
+ byte[] k = new byte[3];
+ byte [][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));
+ for (byte b1 = 'a'; b1 < 'z'; b1++) {
+ for (byte b2 = 'a'; b2 < 'z'; b2++) {
+ for (byte b3 = 'a'; b3 < 'z'; b3++) {
+ if (rng.nextDouble() < prob) {
+ k[0] = b1;
+ k[1] = b2;
+ k[2] = b3;
+ Put put = new Put(k);
+ if(famAndQf.length == 1) {
+ put.add(famAndQf[0], new byte[0], k);
+ } else {
+ put.add(famAndQf[0], famAndQf[1], k);
+ }
+ table.put(put);
+ count++;
+ }
+ }
+ }
+ }
+ table.flushCommits();
+ return count;
+ }
+
+ public TestScannerResource() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(
+ CellModel.class,
+ CellSetModel.class,
+ RowModel.class,
+ ScannerModel.class);
+ marshaller = context.createMarshaller();
+ unmarshaller = context.createUnmarshaller();
+ }
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ client = new Client(new Cluster().add("localhost", testServletPort));
+ admin = new HBaseAdmin(conf);
+ if (admin.tableExists(TABLE)) {
+ return;
+ }
+ HTableDescriptor htd = new HTableDescriptor(TABLE);
+ htd.addFamily(new HColumnDescriptor(KeyValue.parseColumn(
+ Bytes.toBytes(COLUMN_1))[0]));
+ htd.addFamily(new HColumnDescriptor(KeyValue.parseColumn(
+ Bytes.toBytes(COLUMN_2))[0]));
+ admin.createTable(htd);
+ expectedRows1 = insertData(TABLE, COLUMN_1, 1.0);
+ expectedRows2 = insertData(TABLE, COLUMN_2, 0.5);
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ client.shutdown();
+ super.tearDown();
+ }
+
+ private int countCellSet(CellSetModel model) {
+ int count = 0;
+ Iterator rows = model.getRows().iterator();
+ while (rows.hasNext()) {
+ RowModel row = rows.next();
+ Iterator cells = row.getCells().iterator();
+ while (cells.hasNext()) {
+ cells.next();
+ count++;
+ }
+ }
+ return count;
+ }
+
+ public void testSimpleScannerXML() throws IOException, JAXBException {
+ final int BATCH_SIZE = 5;
+ // new scanner
+ ScannerModel model = new ScannerModel();
+ model.setBatch(BATCH_SIZE);
+ model.addColumn(Bytes.toBytes(COLUMN_1));
+ StringWriter writer = new StringWriter();
+ marshaller.marshal(model, writer);
+ byte[] body = Bytes.toBytes(writer.toString());
+ Response response = client.put("/" + TABLE + "/scanner", MIMETYPE_XML,
+ body);
+ assertEquals(response.getCode(), 201);
+ String scannerURI = response.getLocation();
+ assertNotNull(scannerURI);
+
+ // get a cell set
+ response = client.get(scannerURI, MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ CellSetModel cellSet = (CellSetModel)
+ unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
+ // confirm batch size conformance
+ assertEquals(countCellSet(cellSet), BATCH_SIZE);
+
+ // delete the scanner
+ response = client.delete(scannerURI);
+ assertEquals(response.getCode(), 200);
+ }
+
+ public void testSimpleScannerPB() throws IOException {
+ final int BATCH_SIZE = 10;
+ // new scanner
+ ScannerModel model = new ScannerModel();
+ model.setBatch(BATCH_SIZE);
+ model.addColumn(Bytes.toBytes(COLUMN_1));
+ Response response = client.put("/" + TABLE + "/scanner",
+ MIMETYPE_PROTOBUF, model.createProtobufOutput());
+ assertEquals(response.getCode(), 201);
+ String scannerURI = response.getLocation();
+ assertNotNull(scannerURI);
+
+ // get a cell set
+ response = client.get(scannerURI, MIMETYPE_PROTOBUF);
+ assertEquals(response.getCode(), 200);
+ CellSetModel cellSet = new CellSetModel();
+ cellSet.getObjectFromMessage(response.getBody());
+ // confirm batch size conformance
+ assertEquals(countCellSet(cellSet), BATCH_SIZE);
+
+ // delete the scanner
+ response = client.delete(scannerURI);
+ assertEquals(response.getCode(), 200);
+ }
+
+ public void testSimpleScannerBinary() throws IOException {
+ // new scanner
+ ScannerModel model = new ScannerModel();
+ model.setBatch(1);
+ model.addColumn(Bytes.toBytes(COLUMN_1));
+ Response response = client.put("/" + TABLE + "/scanner",
+ MIMETYPE_PROTOBUF, model.createProtobufOutput());
+ assertEquals(response.getCode(), 201);
+ String scannerURI = response.getLocation();
+ assertNotNull(scannerURI);
+
+ // get a cell
+ response = client.get(scannerURI, MIMETYPE_BINARY);
+ assertEquals(response.getCode(), 200);
+ // verify that data was returned
+ assertTrue(response.getBody().length > 0);
+ // verify that the expected X-headers are present
+ boolean foundRowHeader = false, foundColumnHeader = false,
+ foundTimestampHeader = false;
+ for (Header header: response.getHeaders()) {
+ if (header.getName().equals("X-Row")) {
+ foundRowHeader = true;
+ } else if (header.getName().equals("X-Column")) {
+ foundColumnHeader = true;
+ } else if (header.getName().equals("X-Timestamp")) {
+ foundTimestampHeader = true;
+ }
+ }
+ assertTrue(foundRowHeader);
+ assertTrue(foundColumnHeader);
+ assertTrue(foundTimestampHeader);
+
+ // delete the scanner
+ response = client.delete(scannerURI);
+ assertEquals(response.getCode(), 200);
+ }
+
+ private int fullTableScan(ScannerModel model) throws IOException {
+ model.setBatch(100);
+ Response response = client.put("/" + TABLE + "/scanner",
+ MIMETYPE_PROTOBUF, model.createProtobufOutput());
+ assertEquals(response.getCode(), 201);
+ String scannerURI = response.getLocation();
+ assertNotNull(scannerURI);
+ int count = 0;
+ while (true) {
+ response = client.get(scannerURI, MIMETYPE_PROTOBUF);
+ assertTrue(response.getCode() == 200 || response.getCode() == 204);
+ if (response.getCode() == 200) {
+ CellSetModel cellSet = new CellSetModel();
+ cellSet.getObjectFromMessage(response.getBody());
+ Iterator rows = cellSet.getRows().iterator();
+ while (rows.hasNext()) {
+ RowModel row = rows.next();
+ Iterator cells = row.getCells().iterator();
+ while (cells.hasNext()) {
+ cells.next();
+ count++;
+ }
+ }
+ } else {
+ break;
+ }
+ }
+ // delete the scanner
+ response = client.delete(scannerURI);
+ assertEquals(response.getCode(), 200);
+ return count;
+ }
+
+ public void testFullTableScan() throws IOException {
+ ScannerModel model = new ScannerModel();
+ model.addColumn(Bytes.toBytes(COLUMN_1));
+ assertEquals(fullTableScan(model), expectedRows1);
+
+ model = new ScannerModel();
+ model.addColumn(Bytes.toBytes(COLUMN_2));
+ assertEquals(fullTableScan(model), expectedRows2);
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestScannersWithFilters.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestScannersWithFilters.java
new file mode 100644
index 0000000..e31a98d
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestScannersWithFilters.java
@@ -0,0 +1,979 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.ByteArrayInputStream;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Marshaller;
+import javax.xml.bind.Unmarshaller;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.InclusiveStopFilter;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.hbase.filter.RegexStringComparator;
+import org.apache.hadoop.hbase.filter.RowFilter;
+import org.apache.hadoop.hbase.filter.SkipFilter;
+import org.apache.hadoop.hbase.filter.SubstringComparator;
+import org.apache.hadoop.hbase.filter.ValueFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.stargate.client.Client;
+import org.apache.hadoop.hbase.stargate.client.Cluster;
+import org.apache.hadoop.hbase.stargate.client.Response;
+import org.apache.hadoop.hbase.stargate.model.CellModel;
+import org.apache.hadoop.hbase.stargate.model.CellSetModel;
+import org.apache.hadoop.hbase.stargate.model.RowModel;
+import org.apache.hadoop.hbase.stargate.model.ScannerModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class TestScannersWithFilters extends MiniClusterTestBase {
+
+ private static final Log LOG =
+ LogFactory.getLog(TestScannersWithFilters.class);
+
+ private Client client;
+ private JAXBContext context;
+ private Marshaller marshaller;
+ private Unmarshaller unmarshaller;
+
+ private static final byte [][] ROWS_ONE = {
+ Bytes.toBytes("testRowOne-0"), Bytes.toBytes("testRowOne-1"),
+ Bytes.toBytes("testRowOne-2"), Bytes.toBytes("testRowOne-3")
+ };
+
+ private static final byte [][] ROWS_TWO = {
+ Bytes.toBytes("testRowTwo-0"), Bytes.toBytes("testRowTwo-1"),
+ Bytes.toBytes("testRowTwo-2"), Bytes.toBytes("testRowTwo-3")
+ };
+
+ private static final byte [][] FAMILIES = {
+ Bytes.toBytes("testFamilyOne"), Bytes.toBytes("testFamilyTwo")
+ };
+
+ private static final byte [][] QUALIFIERS_ONE = {
+ Bytes.toBytes("testQualifierOne-0"), Bytes.toBytes("testQualifierOne-1"),
+ Bytes.toBytes("testQualifierOne-2"), Bytes.toBytes("testQualifierOne-3")
+ };
+
+ private static final byte [][] QUALIFIERS_TWO = {
+ Bytes.toBytes("testQualifierTwo-0"), Bytes.toBytes("testQualifierTwo-1"),
+ Bytes.toBytes("testQualifierTwo-2"), Bytes.toBytes("testQualifierTwo-3")
+ };
+
+ private static final byte [][] VALUES = {
+ Bytes.toBytes("testValueOne"), Bytes.toBytes("testValueTwo")
+ };
+
+ private long numRows = ROWS_ONE.length + ROWS_TWO.length;
+ private long colsPerRow = FAMILIES.length * QUALIFIERS_ONE.length;
+
+ public TestScannersWithFilters() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(
+ CellModel.class,
+ CellSetModel.class,
+ RowModel.class,
+ ScannerModel.class);
+ marshaller = context.createMarshaller();
+ unmarshaller = context.createUnmarshaller();
+ }
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ client = new Client(new Cluster().add("localhost", testServletPort));
+ HBaseAdmin admin = new HBaseAdmin(conf);
+ if (!admin.tableExists(getName())) {
+ HTableDescriptor htd = new HTableDescriptor(getName());
+ htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
+ htd.addFamily(new HColumnDescriptor(FAMILIES[1]));
+ admin.createTable(htd);
+ HTable table = new HTable(conf, getName());
+ // Insert first half
+ for(byte [] ROW : ROWS_ONE) {
+ Put p = new Put(ROW);
+ for(byte [] QUALIFIER : QUALIFIERS_ONE) {
+ p.add(FAMILIES[0], QUALIFIER, VALUES[0]);
+ }
+ table.put(p);
+ }
+ for(byte [] ROW : ROWS_TWO) {
+ Put p = new Put(ROW);
+ for(byte [] QUALIFIER : QUALIFIERS_TWO) {
+ p.add(FAMILIES[1], QUALIFIER, VALUES[1]);
+ }
+ table.put(p);
+ }
+
+ // Insert second half (reverse families)
+ for(byte [] ROW : ROWS_ONE) {
+ Put p = new Put(ROW);
+ for(byte [] QUALIFIER : QUALIFIERS_ONE) {
+ p.add(FAMILIES[1], QUALIFIER, VALUES[0]);
+ }
+ table.put(p);
+ }
+ for(byte [] ROW : ROWS_TWO) {
+ Put p = new Put(ROW);
+ for(byte [] QUALIFIER : QUALIFIERS_TWO) {
+ p.add(FAMILIES[0], QUALIFIER, VALUES[1]);
+ }
+ table.put(p);
+ }
+
+ // Delete the second qualifier from all rows and families
+ for(byte [] ROW : ROWS_ONE) {
+ Delete d = new Delete(ROW);
+ d.deleteColumns(FAMILIES[0], QUALIFIERS_ONE[1]);
+ d.deleteColumns(FAMILIES[1], QUALIFIERS_ONE[1]);
+ table.delete(d);
+ }
+ for(byte [] ROW : ROWS_TWO) {
+ Delete d = new Delete(ROW);
+ d.deleteColumns(FAMILIES[0], QUALIFIERS_TWO[1]);
+ d.deleteColumns(FAMILIES[1], QUALIFIERS_TWO[1]);
+ table.delete(d);
+ }
+ colsPerRow -= 2;
+
+ // Delete the second rows from both groups, one column at a time
+ for(byte [] QUALIFIER : QUALIFIERS_ONE) {
+ Delete d = new Delete(ROWS_ONE[1]);
+ d.deleteColumns(FAMILIES[0], QUALIFIER);
+ d.deleteColumns(FAMILIES[1], QUALIFIER);
+ table.delete(d);
+ }
+ for(byte [] QUALIFIER : QUALIFIERS_TWO) {
+ Delete d = new Delete(ROWS_TWO[1]);
+ d.deleteColumns(FAMILIES[0], QUALIFIER);
+ d.deleteColumns(FAMILIES[1], QUALIFIER);
+ table.delete(d);
+ }
+ numRows -= 2;
+ }
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ client.shutdown();
+ super.tearDown();
+ }
+
+ private void verifyScan(Scan s, long expectedRows, long expectedKeys)
+ throws Exception {
+ ScannerModel model = ScannerModel.fromScan(s);
+ model.setBatch(Integer.MAX_VALUE); // fetch it all at once
+ StringWriter writer = new StringWriter();
+ marshaller.marshal(model, writer);
+ LOG.debug(writer.toString());
+ byte[] body = Bytes.toBytes(writer.toString());
+ Response response = client.put("/" + getName() + "/scanner", MIMETYPE_XML,
+ body);
+ assertEquals(response.getCode(), 201);
+ String scannerURI = response.getLocation();
+ assertNotNull(scannerURI);
+
+ // get a cell set
+ response = client.get(scannerURI, MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ CellSetModel cells = (CellSetModel)
+ unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
+
+ int rows = cells.getRows().size();
+ assertTrue("Scanned too many rows! Only expected " + expectedRows +
+ " total but scanned " + rows, expectedRows == rows);
+ for (RowModel row: cells.getRows()) {
+ int count = row.getCells().size();
+ assertEquals("Expected " + expectedKeys + " keys per row but " +
+ "returned " + count, expectedKeys, count);
+ }
+
+ // delete the scanner
+ response = client.delete(scannerURI);
+ assertEquals(response.getCode(), 200);
+ }
+
+ private void verifyScanFull(Scan s, KeyValue [] kvs) throws Exception {
+ ScannerModel model = ScannerModel.fromScan(s);
+ model.setBatch(Integer.MAX_VALUE); // fetch it all at once
+ StringWriter writer = new StringWriter();
+ marshaller.marshal(model, writer);
+ LOG.debug(writer.toString());
+ byte[] body = Bytes.toBytes(writer.toString());
+ Response response = client.put("/" + getName() + "/scanner", MIMETYPE_XML,
+ body);
+ assertEquals(response.getCode(), 201);
+ String scannerURI = response.getLocation();
+ assertNotNull(scannerURI);
+
+ // get a cell set
+ response = client.get(scannerURI, MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ CellSetModel cellSet = (CellSetModel)
+ unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
+
+ // delete the scanner
+ response = client.delete(scannerURI);
+ assertEquals(response.getCode(), 200);
+
+ int row = 0;
+ int idx = 0;
+ Iterator i = cellSet.getRows().iterator();
+ for (boolean done = true; done; row++) {
+ done = i.hasNext();
+ if (!done) break;
+ RowModel rowModel = i.next();
+ List cells = rowModel.getCells();
+ if (cells.isEmpty()) break;
+ assertTrue("Scanned too many keys! Only expected " + kvs.length +
+ " total but already scanned " + (cells.size() + idx),
+ kvs.length >= idx + cells.size());
+ for (CellModel cell: cells) {
+ assertTrue("Row mismatch",
+ Bytes.equals(rowModel.getKey(), kvs[idx].getRow()));
+ byte[][] split = KeyValue.parseColumn(cell.getColumn());
+ assertTrue("Family mismatch",
+ Bytes.equals(split[0], kvs[idx].getFamily()));
+ assertTrue("Qualifier mismatch",
+ Bytes.equals(split[1], kvs[idx].getQualifier()));
+ assertTrue("Value mismatch",
+ Bytes.equals(cell.getValue(), kvs[idx].getValue()));
+ idx++;
+ }
+ }
+ assertEquals("Expected " + kvs.length + " total keys but scanned " + idx,
+ kvs.length, idx);
+ }
+
+ private void verifyScanNoEarlyOut(Scan s, long expectedRows, long expectedKeys)
+ throws Exception {
+ ScannerModel model = ScannerModel.fromScan(s);
+ model.setBatch(Integer.MAX_VALUE); // fetch it all at once
+ StringWriter writer = new StringWriter();
+ marshaller.marshal(model, writer);
+ LOG.debug(writer.toString());
+ byte[] body = Bytes.toBytes(writer.toString());
+ Response response = client.put("/" + getName() + "/scanner", MIMETYPE_XML,
+ body);
+ assertEquals(response.getCode(), 201);
+ String scannerURI = response.getLocation();
+ assertNotNull(scannerURI);
+
+ // get a cell set
+ response = client.get(scannerURI, MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ CellSetModel cellSet = (CellSetModel)
+ unmarshaller.unmarshal(new ByteArrayInputStream(response.getBody()));
+
+ // delete the scanner
+ response = client.delete(scannerURI);
+ assertEquals(response.getCode(), 200);
+
+ Iterator i = cellSet.getRows().iterator();
+ int j = 0;
+ for (boolean done = true; done; j++) {
+ done = i.hasNext();
+ if (!done) break;
+ RowModel rowModel = i.next();
+ List cells = rowModel.getCells();
+ if (cells.isEmpty()) break;
+ assertTrue("Scanned too many rows! Only expected " + expectedRows +
+ " total but already scanned " + (j+1), expectedRows > j);
+ assertEquals("Expected " + expectedKeys + " keys per row but " +
+ "returned " + cells.size(), expectedKeys, cells.size());
+ }
+ assertEquals("Expected " + expectedRows + " rows but scanned " + j +
+ " rows", expectedRows, j);
+ }
+
+ public void testNoFilter() throws Exception {
+ // No filter
+ long expectedRows = this.numRows;
+ long expectedKeys = this.colsPerRow;
+
+ // Both families
+ Scan s = new Scan();
+ verifyScan(s, expectedRows, expectedKeys);
+
+ // One family
+ s = new Scan();
+ s.addFamily(FAMILIES[0]);
+ verifyScan(s, expectedRows, expectedKeys/2);
+ }
+
+ public void testPrefixFilter() throws Exception {
+ // Grab rows from group one (half of total)
+ long expectedRows = this.numRows / 2;
+ long expectedKeys = this.colsPerRow;
+ Scan s = new Scan();
+ s.setFilter(new PrefixFilter(Bytes.toBytes("testRowOne")));
+ verifyScan(s, expectedRows, expectedKeys);
+ }
+
+ public void testPageFilter() throws Exception {
+
+ // KVs in first 6 rows
+ KeyValue [] expectedKVs = {
+ // testRowOne-0
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowOne-2
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowOne-3
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowTwo-0
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-2
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-3
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1])
+ };
+
+ // Grab all 6 rows
+ long expectedRows = 6;
+ long expectedKeys = this.colsPerRow;
+ Scan s = new Scan();
+ s.setFilter(new PageFilter(expectedRows));
+ verifyScan(s, expectedRows, expectedKeys);
+ s.setFilter(new PageFilter(expectedRows));
+ verifyScanFull(s, expectedKVs);
+
+ // Grab first 4 rows (6 cols per row)
+ expectedRows = 4;
+ expectedKeys = this.colsPerRow;
+ s = new Scan();
+ s.setFilter(new PageFilter(expectedRows));
+ verifyScan(s, expectedRows, expectedKeys);
+ s.setFilter(new PageFilter(expectedRows));
+ verifyScanFull(s, Arrays.copyOf(expectedKVs, 24));
+
+ // Grab first 2 rows
+ expectedRows = 2;
+ expectedKeys = this.colsPerRow;
+ s = new Scan();
+ s.setFilter(new PageFilter(expectedRows));
+ verifyScan(s, expectedRows, expectedKeys);
+ s.setFilter(new PageFilter(expectedRows));
+ verifyScanFull(s, Arrays.copyOf(expectedKVs, 12));
+
+ // Grab first row
+ expectedRows = 1;
+ expectedKeys = this.colsPerRow;
+ s = new Scan();
+ s.setFilter(new PageFilter(expectedRows));
+ verifyScan(s, expectedRows, expectedKeys);
+ s.setFilter(new PageFilter(expectedRows));
+ verifyScanFull(s, Arrays.copyOf(expectedKVs, 6));
+ }
+
+ public void testInclusiveStopFilter() throws Exception {
+
+ // Grab rows from group one
+
+ // If we just use start/stop row, we get total/2 - 1 rows
+ long expectedRows = (this.numRows / 2) - 1;
+ long expectedKeys = this.colsPerRow;
+ Scan s = new Scan(Bytes.toBytes("testRowOne-0"),
+ Bytes.toBytes("testRowOne-3"));
+ verifyScan(s, expectedRows, expectedKeys);
+
+ // Now use start row with inclusive stop filter
+ expectedRows = this.numRows / 2;
+ s = new Scan(Bytes.toBytes("testRowOne-0"));
+ s.setFilter(new InclusiveStopFilter(Bytes.toBytes("testRowOne-3")));
+ verifyScan(s, expectedRows, expectedKeys);
+
+ // Grab rows from group two
+
+ // If we just use start/stop row, we get total/2 - 1 rows
+ expectedRows = (this.numRows / 2) - 1;
+ expectedKeys = this.colsPerRow;
+ s = new Scan(Bytes.toBytes("testRowTwo-0"),
+ Bytes.toBytes("testRowTwo-3"));
+ verifyScan(s, expectedRows, expectedKeys);
+
+ // Now use start row with inclusive stop filter
+ expectedRows = this.numRows / 2;
+ s = new Scan(Bytes.toBytes("testRowTwo-0"));
+ s.setFilter(new InclusiveStopFilter(Bytes.toBytes("testRowTwo-3")));
+ verifyScan(s, expectedRows, expectedKeys);
+
+ }
+
+ public void testQualifierFilter() throws Exception {
+
+ // Match two keys (one from each family) in half the rows
+ long expectedRows = this.numRows / 2;
+ long expectedKeys = 2;
+ Filter f = new QualifierFilter(CompareOp.EQUAL,
+ new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
+ Scan s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match keys less than same qualifier
+ // Expect only two keys (one from each family) in half the rows
+ expectedRows = this.numRows / 2;
+ expectedKeys = 2;
+ f = new QualifierFilter(CompareOp.LESS,
+ new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match keys less than or equal
+ // Expect four keys (two from each family) in half the rows
+ expectedRows = this.numRows / 2;
+ expectedKeys = 4;
+ f = new QualifierFilter(CompareOp.LESS_OR_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match keys not equal
+ // Expect four keys (two from each family)
+ // Only look in first group of rows
+ expectedRows = this.numRows / 2;
+ expectedKeys = 4;
+ f = new QualifierFilter(CompareOp.NOT_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
+ s = new Scan(HConstants.EMPTY_START_ROW, Bytes.toBytes("testRowTwo"));
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match keys greater or equal
+ // Expect four keys (two from each family)
+ // Only look in first group of rows
+ expectedRows = this.numRows / 2;
+ expectedKeys = 4;
+ f = new QualifierFilter(CompareOp.GREATER_OR_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
+ s = new Scan(HConstants.EMPTY_START_ROW, Bytes.toBytes("testRowTwo"));
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match keys greater
+ // Expect two keys (one from each family)
+ // Only look in first group of rows
+ expectedRows = this.numRows / 2;
+ expectedKeys = 2;
+ f = new QualifierFilter(CompareOp.GREATER,
+ new BinaryComparator(Bytes.toBytes("testQualifierOne-2")));
+ s = new Scan(HConstants.EMPTY_START_ROW, Bytes.toBytes("testRowTwo"));
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match keys not equal to
+ // Look across rows and fully validate the keys and ordering
+ // Expect varied numbers of keys, 4 per row in group one, 6 per row in group two
+ f = new QualifierFilter(CompareOp.NOT_EQUAL,
+ new BinaryComparator(QUALIFIERS_ONE[2]));
+ s = new Scan();
+ s.setFilter(f);
+
+ KeyValue [] kvs = {
+ // testRowOne-0
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowOne-2
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowOne-3
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowTwo-0
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-2
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-3
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ };
+ verifyScanFull(s, kvs);
+
+
+ // Test across rows and groups with a regex
+ // Filter out "test*-2"
+ // Expect 4 keys per row across both groups
+ f = new QualifierFilter(CompareOp.NOT_EQUAL,
+ new RegexStringComparator("test.+-2"));
+ s = new Scan();
+ s.setFilter(f);
+
+ kvs = new KeyValue [] {
+ // testRowOne-0
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowOne-2
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowOne-3
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowTwo-0
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-2
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-3
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ };
+ verifyScanFull(s, kvs);
+
+ }
+
+ public void testRowFilter() throws Exception {
+
+ // Match a single row, all keys
+ long expectedRows = 1;
+ long expectedKeys = this.colsPerRow;
+ Filter f = new RowFilter(CompareOp.EQUAL,
+ new BinaryComparator(Bytes.toBytes("testRowOne-2")));
+ Scan s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match a two rows, one from each group, using regex
+ expectedRows = 2;
+ expectedKeys = this.colsPerRow;
+ f = new RowFilter(CompareOp.EQUAL,
+ new RegexStringComparator("testRow.+-2"));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match rows less than
+ // Expect all keys in one row
+ expectedRows = 1;
+ expectedKeys = this.colsPerRow;
+ f = new RowFilter(CompareOp.LESS,
+ new BinaryComparator(Bytes.toBytes("testRowOne-2")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match rows less than or equal
+ // Expect all keys in two rows
+ expectedRows = 2;
+ expectedKeys = this.colsPerRow;
+ f = new RowFilter(CompareOp.LESS_OR_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testRowOne-2")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match rows not equal
+ // Expect all keys in all but one row
+ expectedRows = this.numRows - 1;
+ expectedKeys = this.colsPerRow;
+ f = new RowFilter(CompareOp.NOT_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testRowOne-2")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match keys greater or equal
+ // Expect all keys in all but one row
+ expectedRows = this.numRows - 1;
+ expectedKeys = this.colsPerRow;
+ f = new RowFilter(CompareOp.GREATER_OR_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testRowOne-2")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match keys greater
+ // Expect all keys in all but two rows
+ expectedRows = this.numRows - 2;
+ expectedKeys = this.colsPerRow;
+ f = new RowFilter(CompareOp.GREATER,
+ new BinaryComparator(Bytes.toBytes("testRowOne-2")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match rows not equal to testRowTwo-2
+ // Look across rows and fully validate the keys and ordering
+ // Should see all keys in all rows but testRowTwo-2
+ f = new RowFilter(CompareOp.NOT_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testRowOne-2")));
+ s = new Scan();
+ s.setFilter(f);
+
+ KeyValue [] kvs = {
+ // testRowOne-0
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[0], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowOne-3
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowTwo-0
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-2
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-3
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ };
+ verifyScanFull(s, kvs);
+
+
+ // Test across rows and groups with a regex
+ // Filter out everything that doesn't match "*-2"
+ // Expect all keys in two rows
+ f = new RowFilter(CompareOp.EQUAL,
+ new RegexStringComparator(".+-2"));
+ s = new Scan();
+ s.setFilter(f);
+
+ kvs = new KeyValue [] {
+ // testRowOne-2
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[2], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[1], QUALIFIERS_ONE[3], VALUES[0]),
+ // testRowTwo-2
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1])
+ };
+ verifyScanFull(s, kvs);
+
+ }
+
+ public void testValueFilter() throws Exception {
+
+ // Match group one rows
+ long expectedRows = this.numRows / 2;
+ long expectedKeys = this.colsPerRow;
+ Filter f = new ValueFilter(CompareOp.EQUAL,
+ new BinaryComparator(Bytes.toBytes("testValueOne")));
+ Scan s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match group two rows
+ expectedRows = this.numRows / 2;
+ expectedKeys = this.colsPerRow;
+ f = new ValueFilter(CompareOp.EQUAL,
+ new BinaryComparator(Bytes.toBytes("testValueTwo")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match all values using regex
+ expectedRows = this.numRows;
+ expectedKeys = this.colsPerRow;
+ f = new ValueFilter(CompareOp.EQUAL,
+ new RegexStringComparator("testValue((One)|(Two))"));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match values less than
+ // Expect group one rows
+ expectedRows = this.numRows / 2;
+ expectedKeys = this.colsPerRow;
+ f = new ValueFilter(CompareOp.LESS,
+ new BinaryComparator(Bytes.toBytes("testValueTwo")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match values less than or equal
+ // Expect all rows
+ expectedRows = this.numRows;
+ expectedKeys = this.colsPerRow;
+ f = new ValueFilter(CompareOp.LESS_OR_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testValueTwo")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match values less than or equal
+ // Expect group one rows
+ expectedRows = this.numRows / 2;
+ expectedKeys = this.colsPerRow;
+ f = new ValueFilter(CompareOp.LESS_OR_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testValueOne")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match values not equal
+ // Expect half the rows
+ expectedRows = this.numRows / 2;
+ expectedKeys = this.colsPerRow;
+ f = new ValueFilter(CompareOp.NOT_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testValueOne")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match values greater or equal
+ // Expect all rows
+ expectedRows = this.numRows;
+ expectedKeys = this.colsPerRow;
+ f = new ValueFilter(CompareOp.GREATER_OR_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testValueOne")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match values greater
+ // Expect half rows
+ expectedRows = this.numRows / 2;
+ expectedKeys = this.colsPerRow;
+ f = new ValueFilter(CompareOp.GREATER,
+ new BinaryComparator(Bytes.toBytes("testValueOne")));
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, expectedRows, expectedKeys);
+
+ // Match values not equal to testValueOne
+ // Look across rows and fully validate the keys and ordering
+ // Should see all keys in all group two rows
+ f = new ValueFilter(CompareOp.NOT_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testValueOne")));
+ s = new Scan();
+ s.setFilter(f);
+
+ KeyValue [] kvs = {
+ // testRowTwo-0
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-2
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-3
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ };
+ verifyScanFull(s, kvs);
+ }
+
+ public void testSkipFilter() throws Exception {
+
+ // Test for qualifier regex: "testQualifierOne-2"
+ // Should only get rows from second group, and all keys
+ Filter f = new SkipFilter(new QualifierFilter(CompareOp.NOT_EQUAL,
+ new BinaryComparator(Bytes.toBytes("testQualifierOne-2"))));
+ Scan s = new Scan();
+ s.setFilter(f);
+
+ KeyValue [] kvs = {
+ // testRowTwo-0
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-2
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ // testRowTwo-3
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[3], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[2], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[1], QUALIFIERS_TWO[3], VALUES[1]),
+ };
+ verifyScanFull(s, kvs);
+ }
+
+ public void testFilterList() throws Exception {
+
+ // Test getting a single row, single key using Row, Qualifier, and Value
+ // regular expression and substring filters
+ // Use must pass all
+ List filters = new ArrayList();
+ filters.add(new RowFilter(CompareOp.EQUAL, new RegexStringComparator(".+-2")));
+ filters.add(new QualifierFilter(CompareOp.EQUAL, new RegexStringComparator(".+-2")));
+ filters.add(new ValueFilter(CompareOp.EQUAL, new SubstringComparator("One")));
+ Filter f = new FilterList(Operator.MUST_PASS_ALL, filters);
+ Scan s = new Scan();
+ s.addFamily(FAMILIES[0]);
+ s.setFilter(f);
+ KeyValue [] kvs = {
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[0])
+ };
+ verifyScanFull(s, kvs);
+
+ // Test getting everything with a MUST_PASS_ONE filter including row, qf, val
+ // regular expression and substring filters
+ filters.clear();
+ filters.add(new RowFilter(CompareOp.EQUAL, new RegexStringComparator(".+Two.+")));
+ filters.add(new QualifierFilter(CompareOp.EQUAL, new RegexStringComparator(".+-2")));
+ filters.add(new ValueFilter(CompareOp.EQUAL, new SubstringComparator("One")));
+ f = new FilterList(Operator.MUST_PASS_ONE, filters);
+ s = new Scan();
+ s.setFilter(f);
+ verifyScanNoEarlyOut(s, this.numRows, this.colsPerRow);
+ }
+
+ public void testFirstKeyOnlyFilter() throws Exception {
+ Scan s = new Scan();
+ s.setFilter(new FirstKeyOnlyFilter());
+ // Expected KVs, the first KV from each of the remaining 6 rows
+ KeyValue [] kvs = {
+ new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+ new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+ new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1])
+ };
+ verifyScanFull(s, kvs);
+ }
+
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestSchemaResource.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestSchemaResource.java
new file mode 100644
index 0000000..64c2d14
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestSchemaResource.java
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.StringWriter;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.stargate.client.Client;
+import org.apache.hadoop.hbase.stargate.client.Cluster;
+import org.apache.hadoop.hbase.stargate.client.Response;
+import org.apache.hadoop.hbase.stargate.model.ColumnSchemaModel;
+import org.apache.hadoop.hbase.stargate.model.TableSchemaModel;
+import org.apache.hadoop.hbase.stargate.model.TestTableSchemaModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class TestSchemaResource extends MiniClusterTestBase {
+ private Client client;
+ private JAXBContext context;
+ private HBaseAdmin admin;
+
+ private static String TABLE1 = "TestSchemaResource1";
+ private static String TABLE2 = "TestSchemaResource2";
+
+ public TestSchemaResource() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(
+ ColumnSchemaModel.class,
+ TableSchemaModel.class);
+ }
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ admin = new HBaseAdmin(conf);
+ client = new Client(new Cluster().add("localhost", testServletPort));
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ client.shutdown();
+ super.tearDown();
+ }
+
+ private byte[] toXML(TableSchemaModel model) throws JAXBException {
+ StringWriter writer = new StringWriter();
+ context.createMarshaller().marshal(model, writer);
+ return Bytes.toBytes(writer.toString());
+ }
+
+ private TableSchemaModel fromXML(byte[] content) throws JAXBException {
+ return (TableSchemaModel) context.createUnmarshaller()
+ .unmarshal(new ByteArrayInputStream(content));
+ }
+
+ public void testTableCreateAndDeleteXML()
+ throws IOException, JAXBException {
+ String schemaPath = "/" + TABLE1 + "/schema";
+ TableSchemaModel model;
+ Response response;
+
+ assertFalse(admin.tableExists(TABLE1));
+
+ // create the table
+ model = TestTableSchemaModel.buildTestModel(TABLE1);
+ TestTableSchemaModel.checkModel(model, TABLE1);
+ response = client.put(schemaPath, MIMETYPE_XML, toXML(model));
+ assertEquals(response.getCode(), 201);
+
+ // make sure HBase concurs, and wait for the table to come online
+ admin.enableTable(TABLE1);
+
+ // retrieve the schema and validate it
+ response = client.get(schemaPath, MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ model = fromXML(response.getBody());
+ TestTableSchemaModel.checkModel(model, TABLE1);
+
+ // delete the table
+ client.delete(schemaPath);
+
+ // make sure HBase concurs
+ assertFalse(admin.tableExists(TABLE1));
+ }
+
+ public void testTableCreateAndDeletePB() throws IOException, JAXBException {
+ String schemaPath = "/" + TABLE2 + "/schema";
+ TableSchemaModel model;
+ Response response;
+
+ assertFalse(admin.tableExists(TABLE2));
+
+ // create the table
+ model = TestTableSchemaModel.buildTestModel(TABLE2);
+ TestTableSchemaModel.checkModel(model, TABLE2);
+ response = client.put(schemaPath, Constants.MIMETYPE_PROTOBUF,
+ model.createProtobufOutput());
+ assertEquals(response.getCode(), 201);
+
+ // make sure HBase concurs, and wait for the table to come online
+ admin.enableTable(TABLE2);
+
+ // retrieve the schema and validate it
+ response = client.get(schemaPath, Constants.MIMETYPE_PROTOBUF);
+ assertEquals(response.getCode(), 200);
+ model = new TableSchemaModel();
+ model.getObjectFromMessage(response.getBody());
+ TestTableSchemaModel.checkModel(model, TABLE2);
+
+ // delete the table
+ client.delete(schemaPath);
+
+ // make sure HBase concurs
+ assertFalse(admin.tableExists(TABLE2));
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestStatusResource.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestStatusResource.java
new file mode 100644
index 0000000..aa588c6
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestStatusResource.java
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.stargate.client.Client;
+import org.apache.hadoop.hbase.stargate.client.Cluster;
+import org.apache.hadoop.hbase.stargate.client.Response;
+import org.apache.hadoop.hbase.stargate.model.StorageClusterStatusModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class TestStatusResource extends MiniClusterTestBase {
+ private static final byte[] ROOT_REGION_NAME = Bytes.toBytes("-ROOT-,,0");
+ private static final byte[] META_REGION_NAME = Bytes.toBytes(".META.,,1");
+
+ private Client client;
+ private JAXBContext context;
+
+ public TestStatusResource() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(
+ StorageClusterStatusModel.class);
+ }
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ client = new Client(new Cluster().add("localhost", testServletPort));
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ client.shutdown();
+ super.tearDown();
+ }
+
+ private void validate(StorageClusterStatusModel model) {
+ assertNotNull(model);
+ assertTrue(model.getRegions() >= 2);
+ assertTrue(model.getRequests() >= 0);
+ // assumes minicluster with two regionservers
+ assertTrue(model.getAverageLoad() >= 1.0);
+ assertNotNull(model.getLiveNodes());
+ assertNotNull(model.getDeadNodes());
+ assertFalse(model.getLiveNodes().isEmpty());
+ boolean foundRoot = false, foundMeta = false;
+ for (StorageClusterStatusModel.Node node: model.getLiveNodes()) {
+ assertNotNull(node.getName());
+ assertTrue(node.getStartCode() > 0L);
+ assertTrue(node.getRequests() >= 0);
+ assertFalse(node.getRegions().isEmpty());
+ for (StorageClusterStatusModel.Node.Region region: node.getRegions()) {
+ if (Bytes.equals(region.getName(), ROOT_REGION_NAME)) {
+ foundRoot = true;
+ } else if (Bytes.equals(region.getName(), META_REGION_NAME)) {
+ foundMeta = true;
+ }
+ }
+ }
+ assertTrue(foundRoot);
+ assertTrue(foundMeta);
+ }
+
+ public void testGetClusterStatusXML() throws IOException, JAXBException {
+ Response response = client.get("/status/cluster", MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ StorageClusterStatusModel model = (StorageClusterStatusModel)
+ context.createUnmarshaller().unmarshal(
+ new ByteArrayInputStream(response.getBody()));
+ validate(model);
+ }
+
+ public void testGetClusterStatusPB() throws IOException {
+ Response response = client.get("/status/cluster", MIMETYPE_PROTOBUF);
+ assertEquals(response.getCode(), 200);
+ StorageClusterStatusModel model = new StorageClusterStatusModel();
+ model.getObjectFromMessage(response.getBody());
+ validate(model);
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestTableResource.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestTableResource.java
new file mode 100644
index 0000000..7a41ead
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestTableResource.java
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.util.Iterator;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.stargate.client.Client;
+import org.apache.hadoop.hbase.stargate.client.Cluster;
+import org.apache.hadoop.hbase.stargate.client.Response;
+import org.apache.hadoop.hbase.stargate.model.TableModel;
+import org.apache.hadoop.hbase.stargate.model.TableInfoModel;
+import org.apache.hadoop.hbase.stargate.model.TableListModel;
+import org.apache.hadoop.hbase.stargate.model.TableRegionModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class TestTableResource extends MiniClusterTestBase {
+ private static String TABLE = "TestTableResource";
+ private static String COLUMN = "test:";
+
+ private Client client;
+ private JAXBContext context;
+ private HBaseAdmin admin;
+
+ public TestTableResource() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(
+ TableModel.class,
+ TableInfoModel.class,
+ TableListModel.class,
+ TableRegionModel.class);
+ }
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ client = new Client(new Cluster().add("localhost", testServletPort));
+ admin = new HBaseAdmin(conf);
+ if (admin.tableExists(TABLE)) {
+ return;
+ }
+ HTableDescriptor htd = new HTableDescriptor(TABLE);
+ htd.addFamily(new HColumnDescriptor(KeyValue.parseColumn(
+ Bytes.toBytes(COLUMN))[0]));
+ admin.createTable(htd);
+ new HTable(conf, TABLE);
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ client.shutdown();
+ super.tearDown();
+ }
+
+ private void checkTableList(TableListModel model) {
+ boolean found = false;
+ Iterator tables = model.getTables().iterator();
+ assertTrue(tables.hasNext());
+ while (tables.hasNext()) {
+ TableModel table = tables.next();
+ if (table.getName().equals(TABLE)) {
+ found = true;
+ break;
+ }
+ }
+ assertTrue(found);
+ }
+
+ public void testTableListText() throws IOException {
+ Response response = client.get("/", MIMETYPE_PLAIN);
+ assertEquals(response.getCode(), 200);
+ }
+
+ public void testTableListXML() throws IOException, JAXBException {
+ Response response = client.get("/", MIMETYPE_XML);
+ assertEquals(response.getCode(), 200);
+ TableListModel model = (TableListModel)
+ context.createUnmarshaller()
+ .unmarshal(new ByteArrayInputStream(response.getBody()));
+ checkTableList(model);
+ }
+
+ public void testTableListJSON() throws IOException {
+ Response response = client.get("/", MIMETYPE_JSON);
+ assertEquals(response.getCode(), 200);
+ }
+
+ public void testTableListPB() throws IOException, JAXBException {
+ Response response = client.get("/", MIMETYPE_PROTOBUF);
+ assertEquals(response.getCode(), 200);
+ TableListModel model = new TableListModel();
+ model.getObjectFromMessage(response.getBody());
+ checkTableList(model);
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestVersionResource.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestVersionResource.java
new file mode 100644
index 0000000..d04a405
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/TestVersionResource.java
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.stargate.client.Client;
+import org.apache.hadoop.hbase.stargate.client.Cluster;
+import org.apache.hadoop.hbase.stargate.client.Response;
+import org.apache.hadoop.hbase.stargate.model.StorageClusterVersionModel;
+import org.apache.hadoop.hbase.stargate.model.VersionModel;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import com.sun.jersey.spi.container.servlet.ServletContainer;
+
+public class TestVersionResource extends MiniClusterTestBase {
+ private static final Log LOG =
+ LogFactory.getLog(TestVersionResource.class);
+
+ private Client client;
+ private JAXBContext context;
+
+ public TestVersionResource() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(
+ VersionModel.class,
+ StorageClusterVersionModel.class);
+ }
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ client = new Client(new Cluster().add("localhost", testServletPort));
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ client.shutdown();
+ super.tearDown();
+ }
+
+ private static void validate(VersionModel model) {
+ assertNotNull(model);
+ assertNotNull(model.getStargateVersion());
+ assertEquals(model.getStargateVersion(), RESTServlet.VERSION_STRING);
+ String osVersion = model.getOsVersion();
+ assertNotNull(osVersion);
+ assertTrue(osVersion.contains(System.getProperty("os.name")));
+ assertTrue(osVersion.contains(System.getProperty("os.version")));
+ assertTrue(osVersion.contains(System.getProperty("os.arch")));
+ String jvmVersion = model.getJvmVersion();
+ assertNotNull(jvmVersion);
+ assertTrue(jvmVersion.contains(System.getProperty("java.vm.vendor")));
+ assertTrue(jvmVersion.contains(System.getProperty("java.version")));
+ assertTrue(jvmVersion.contains(System.getProperty("java.vm.version")));
+ assertNotNull(model.getServerVersion());
+ String jerseyVersion = model.getJerseyVersion();
+ assertNotNull(jerseyVersion);
+ assertEquals(jerseyVersion, ServletContainer.class.getPackage()
+ .getImplementationVersion());
+ }
+
+ public void testGetStargateVersionText() throws IOException {
+ Response response = client.get("/version", MIMETYPE_PLAIN);
+ assertTrue(response.getCode() == 200);
+ String body = Bytes.toString(response.getBody());
+ assertTrue(body.length() > 0);
+ assertTrue(body.contains(RESTServlet.VERSION_STRING));
+ assertTrue(body.contains(System.getProperty("java.vm.vendor")));
+ assertTrue(body.contains(System.getProperty("java.version")));
+ assertTrue(body.contains(System.getProperty("java.vm.version")));
+ assertTrue(body.contains(System.getProperty("os.name")));
+ assertTrue(body.contains(System.getProperty("os.version")));
+ assertTrue(body.contains(System.getProperty("os.arch")));
+ assertTrue(body.contains(ServletContainer.class.getPackage()
+ .getImplementationVersion()));
+ }
+
+ public void testGetStargateVersionXML() throws IOException, JAXBException {
+ Response response = client.get("/version", MIMETYPE_XML);
+ assertTrue(response.getCode() == 200);
+ VersionModel model = (VersionModel)
+ context.createUnmarshaller().unmarshal(
+ new ByteArrayInputStream(response.getBody()));
+ validate(model);
+ LOG.info("success retrieving Stargate version as XML");
+ }
+
+ public void testGetStargateVersionJSON() throws IOException {
+ Response response = client.get("/version", MIMETYPE_JSON);
+ assertTrue(response.getCode() == 200);
+ }
+
+ public void testGetStargateVersionPB() throws IOException {
+ Response response = client.get("/version", MIMETYPE_PROTOBUF);
+ assertTrue(response.getCode() == 200);
+ VersionModel model = new VersionModel();
+ model.getObjectFromMessage(response.getBody());
+ validate(model);
+ LOG.info("success retrieving Stargate version as protobuf");
+ }
+
+ public void testGetStorageClusterVersionText() throws IOException {
+ Response response = client.get("/version/cluster", MIMETYPE_PLAIN);
+ assertTrue(response.getCode() == 200);
+ }
+
+ public void testGetStorageClusterVersionXML() throws IOException,
+ JAXBException {
+ Response response = client.get("/version/cluster", MIMETYPE_XML);
+ assertTrue(response.getCode() == 200);
+ StorageClusterVersionModel clusterVersionModel =
+ (StorageClusterVersionModel)
+ context.createUnmarshaller().unmarshal(
+ new ByteArrayInputStream(response.getBody()));
+ assertNotNull(clusterVersionModel);
+ assertNotNull(clusterVersionModel.getVersion());
+ LOG.info("success retrieving storage cluster version as XML");
+ }
+
+ public void testGetStorageClusterVersionJSON() throws IOException {
+ Response response = client.get("/version/cluster", MIMETYPE_JSON);
+ assertTrue(response.getCode() == 200);
+ }
+
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/auth/TestHBCAuthenticator.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/auth/TestHBCAuthenticator.java
new file mode 100644
index 0000000..bc2e915
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/auth/TestHBCAuthenticator.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.auth;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.stargate.User;
+
+import junit.framework.TestCase;
+
+public class TestHBCAuthenticator extends TestCase {
+
+ static final String UNKNOWN_TOKEN = "00000000000000000000000000000000";
+ static final String ADMIN_TOKEN = "e998efffc67c49c6e14921229a51b7b3";
+ static final String ADMIN_USERNAME = "testAdmin";
+ static final String USER_TOKEN = "da4829144e3a2febd909a6e1b4ed7cfa";
+ static final String USER_USERNAME = "testUser";
+ static final String DISABLED_TOKEN = "17de5b5db0fd3de0847bd95396f36d92";
+ static final String DISABLED_USERNAME = "disabledUser";
+
+ static Configuration conf;
+ static HBCAuthenticator authenticator;
+ static {
+ conf = HBaseConfiguration.create();
+ conf.set("stargate.auth.token." + USER_TOKEN, USER_USERNAME);
+ conf.set("stargate.auth.user." + USER_USERNAME + ".admin", "false");
+ conf.set("stargate.auth.user." + USER_USERNAME + ".disabled", "false");
+ conf.set("stargate.auth.token." + ADMIN_TOKEN, ADMIN_USERNAME);
+ conf.set("stargate.auth.user." + ADMIN_USERNAME + ".admin", "true");
+ conf.set("stargate.auth.user." + ADMIN_USERNAME + ".disabled", "false");
+ conf.set("stargate.auth.token." + DISABLED_TOKEN, DISABLED_USERNAME);
+ conf.set("stargate.auth.user." + DISABLED_USERNAME + ".admin", "false");
+ conf.set("stargate.auth.user." + DISABLED_USERNAME + ".disabled", "true");
+ authenticator = new HBCAuthenticator(conf);
+ }
+
+ public void testGetUserUnknown() throws Exception {
+ User user = authenticator.getUserForToken(UNKNOWN_TOKEN);
+ assertNull(user);
+ }
+
+ public void testGetAdminUser() throws Exception {
+ User user = authenticator.getUserForToken(ADMIN_TOKEN);
+ assertNotNull(user);
+ assertEquals(user.getName(), ADMIN_USERNAME);
+ assertTrue(user.isAdmin());
+ assertFalse(user.isDisabled());
+ }
+
+ public void testGetPlainUser() throws Exception {
+ User user = authenticator.getUserForToken(USER_TOKEN);
+ assertNotNull(user);
+ assertEquals(user.getName(), USER_USERNAME);
+ assertFalse(user.isAdmin());
+ assertFalse(user.isDisabled());
+ }
+
+ public void testGetDisabledUser() throws Exception {
+ User user = authenticator.getUserForToken(DISABLED_TOKEN);
+ assertNotNull(user);
+ assertEquals(user.getName(), DISABLED_USERNAME);
+ assertFalse(user.isAdmin());
+ assertTrue(user.isDisabled());
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/auth/TestHTableAuthenticator.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/auth/TestHTableAuthenticator.java
new file mode 100644
index 0000000..d9251c8
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/auth/TestHTableAuthenticator.java
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.auth;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.stargate.MiniClusterTestBase;
+import org.apache.hadoop.hbase.stargate.User;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class TestHTableAuthenticator extends MiniClusterTestBase {
+
+ static final String UNKNOWN_TOKEN = "00000000000000000000000000000000";
+ static final String ADMIN_TOKEN = "e998efffc67c49c6e14921229a51b7b3";
+ static final String ADMIN_USERNAME = "testAdmin";
+ static final String USER_TOKEN = "da4829144e3a2febd909a6e1b4ed7cfa";
+ static final String USER_USERNAME = "testUser";
+ static final String DISABLED_TOKEN = "17de5b5db0fd3de0847bd95396f36d92";
+ static final String DISABLED_USERNAME = "disabledUser";
+
+ static final String TABLE = "TestHTableAuthenticator";
+ static final byte[] USER = Bytes.toBytes("user");
+ static final byte[] NAME = Bytes.toBytes("name");
+ static final byte[] ADMIN = Bytes.toBytes("admin");
+ static final byte[] DISABLED = Bytes.toBytes("disabled");
+
+ HTableAuthenticator authenticator;
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ HBaseAdmin admin = new HBaseAdmin(conf);
+ if (!admin.tableExists(TABLE)) {
+ HTableDescriptor htd = new HTableDescriptor(TABLE);
+ htd.addFamily(new HColumnDescriptor(USER));
+ admin.createTable(htd);
+ HTable table = new HTable(conf, TABLE);
+ Put put = new Put(Bytes.toBytes(ADMIN_TOKEN));
+ put.add(USER, NAME, Bytes.toBytes(ADMIN_USERNAME));
+ put.add(USER, ADMIN, Bytes.toBytes(true));
+ table.put(put);
+ put = new Put(Bytes.toBytes(USER_TOKEN));
+ put.add(USER, NAME, Bytes.toBytes(USER_USERNAME));
+ put.add(USER, ADMIN, Bytes.toBytes(false));
+ table.put(put);
+ put = new Put(Bytes.toBytes(DISABLED_TOKEN));
+ put.add(USER, NAME, Bytes.toBytes(DISABLED_USERNAME));
+ put.add(USER, DISABLED, Bytes.toBytes(true));
+ table.put(put);
+ table.flushCommits();
+ }
+ authenticator = new HTableAuthenticator(conf, TABLE);
+ }
+
+ public void testGetUserUnknown() throws Exception {
+ User user = authenticator.getUserForToken(UNKNOWN_TOKEN);
+ assertNull(user);
+ }
+
+ public void testGetAdminUser() throws Exception {
+ User user = authenticator.getUserForToken(ADMIN_TOKEN);
+ assertNotNull(user);
+ assertEquals(user.getName(), ADMIN_USERNAME);
+ assertTrue(user.isAdmin());
+ assertFalse(user.isDisabled());
+ }
+
+ public void testGetPlainUser() throws Exception {
+ User user = authenticator.getUserForToken(USER_TOKEN);
+ assertNotNull(user);
+ assertEquals(user.getName(), USER_USERNAME);
+ assertFalse(user.isAdmin());
+ assertFalse(user.isDisabled());
+ }
+
+ public void testGetDisabledUser() throws Exception {
+ User user = authenticator.getUserForToken(DISABLED_TOKEN);
+ assertNotNull(user);
+ assertEquals(user.getName(), DISABLED_USERNAME);
+ assertFalse(user.isAdmin());
+ assertTrue(user.isDisabled());
+ }
+
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/auth/TestJDBCAuthenticator.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/auth/TestJDBCAuthenticator.java
new file mode 100644
index 0000000..218a3bd
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/auth/TestJDBCAuthenticator.java
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.auth;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.hbase.stargate.User;
+import org.apache.hadoop.util.StringUtils;
+
+import junit.framework.TestCase;
+
+public class TestJDBCAuthenticator extends TestCase {
+
+ static final Log LOG = LogFactory.getLog(TestJDBCAuthenticator.class);
+
+ static final String TABLE = "users";
+ static final String JDBC_URL = "jdbc:hsqldb:mem:test";
+
+ static final String UNKNOWN_TOKEN = "00000000000000000000000000000000";
+ static final String ADMIN_TOKEN = "e998efffc67c49c6e14921229a51b7b3";
+ static final String ADMIN_USERNAME = "testAdmin";
+ static final String USER_TOKEN = "da4829144e3a2febd909a6e1b4ed7cfa";
+ static final String USER_USERNAME = "testUser";
+ static final String DISABLED_TOKEN = "17de5b5db0fd3de0847bd95396f36d92";
+ static final String DISABLED_USERNAME = "disabledUser";
+
+ static JDBCAuthenticator authenticator;
+ static {
+ try {
+ Class.forName("org.hsqldb.jdbcDriver");
+ Connection c = DriverManager.getConnection(JDBC_URL, "SA", "");
+ c.createStatement().execute(
+ "CREATE TABLE " + TABLE + " ( " +
+ "token CHAR(32) PRIMARY KEY, " +
+ "name VARCHAR(32), " +
+ "admin BOOLEAN, " +
+ "disabled BOOLEAN " +
+ ")");
+ c.createStatement().execute(
+ "INSERT INTO " + TABLE + " ( token,name,admin,disabled ) " +
+ "VALUES ( '" + ADMIN_TOKEN + "','" + ADMIN_USERNAME +
+ "',TRUE,FALSE )");
+ c.createStatement().execute(
+ "INSERT INTO " + TABLE + " ( token,name,admin,disabled ) " +
+ "VALUES ( '" + USER_TOKEN + "','" + USER_USERNAME +
+ "',FALSE,FALSE )");
+ c.createStatement().execute(
+ "INSERT INTO " + TABLE + " ( token,name,admin,disabled ) " +
+ "VALUES ( '" + DISABLED_TOKEN + "','" + DISABLED_USERNAME +
+ "',FALSE,TRUE )");
+ c.createStatement().execute("CREATE USER test PASSWORD access");
+ c.createStatement().execute("GRANT ALL ON " + TABLE + " TO test");
+ c.close();
+ authenticator = new JDBCAuthenticator(JDBC_URL, TABLE, "test",
+ "access");
+ } catch (Exception e) {
+ LOG.warn(StringUtils.stringifyException(e));
+ }
+ }
+
+ public void testGetUserUnknown() throws Exception {
+ User user = authenticator.getUserForToken(UNKNOWN_TOKEN);
+ assertNull(user);
+ }
+
+ public void testGetAdminUser() throws Exception {
+ User user = authenticator.getUserForToken(ADMIN_TOKEN);
+ assertNotNull(user);
+ assertEquals(user.getName(), ADMIN_USERNAME);
+ assertTrue(user.isAdmin());
+ assertFalse(user.isDisabled());
+ }
+
+ public void testGetPlainUser() throws Exception {
+ User user = authenticator.getUserForToken(USER_TOKEN);
+ assertNotNull(user);
+ assertEquals(user.getName(), USER_USERNAME);
+ assertFalse(user.isAdmin());
+ assertFalse(user.isDisabled());
+ }
+
+ public void testGetDisabledUser() throws Exception {
+ User user = authenticator.getUserForToken(DISABLED_TOKEN);
+ assertNotNull(user);
+ assertEquals(user.getName(), DISABLED_USERNAME);
+ assertFalse(user.isAdmin());
+ assertTrue(user.isDisabled());
+ }
+
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/auth/TestZooKeeperAuthenticator.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/auth/TestZooKeeperAuthenticator.java
new file mode 100644
index 0000000..46b4f56
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/auth/TestZooKeeperAuthenticator.java
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.auth;
+
+import org.apache.hadoop.hbase.stargate.MiniClusterTestBase;
+import org.apache.hadoop.hbase.stargate.User;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.ZooDefs.Ids;
+
+import org.json.JSONStringer;
+
+public class TestZooKeeperAuthenticator extends MiniClusterTestBase {
+
+ static final String UNKNOWN_TOKEN = "00000000000000000000000000000000";
+ static final String ADMIN_TOKEN = "e998efffc67c49c6e14921229a51b7b3";
+ static final String ADMIN_USERNAME = "testAdmin";
+ static final String USER_TOKEN = "da4829144e3a2febd909a6e1b4ed7cfa";
+ static final String USER_USERNAME = "testUser";
+ static final String DISABLED_TOKEN = "17de5b5db0fd3de0847bd95396f36d92";
+ static final String DISABLED_USERNAME = "disabledUser";
+
+ ZooKeeperAuthenticator authenticator;
+
+ @Override
+ public void setUp() throws Exception {
+ authenticator = new ZooKeeperAuthenticator(conf);
+ ZooKeeper zk = authenticator.wrapper.getZooKeeper();
+ if (zk.exists(ZooKeeperAuthenticator.USERS_ZNODE_ROOT + "/" +
+ ADMIN_TOKEN, null) == null) {
+ zk.create(ZooKeeperAuthenticator.USERS_ZNODE_ROOT + "/" + ADMIN_TOKEN,
+ Bytes.toBytes(new JSONStringer()
+ .object()
+ .key("name").value(ADMIN_USERNAME)
+ .key("admin").value(true)
+ .endObject().toString()),
+ Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
+ }
+ if (zk.exists(ZooKeeperAuthenticator.USERS_ZNODE_ROOT + "/" +
+ USER_TOKEN, null) == null) {
+ zk.create(ZooKeeperAuthenticator.USERS_ZNODE_ROOT + "/" + USER_TOKEN,
+ Bytes.toBytes(new JSONStringer()
+ .object()
+ .key("name").value(USER_USERNAME)
+ .key("disabled").value(false)
+ .endObject().toString()),
+ Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
+ }
+ if (zk.exists(ZooKeeperAuthenticator.USERS_ZNODE_ROOT + "/" +
+ DISABLED_TOKEN, null) == null) {
+ zk.create(ZooKeeperAuthenticator.USERS_ZNODE_ROOT + "/" +DISABLED_TOKEN,
+ Bytes.toBytes(new JSONStringer()
+ .object()
+ .key("name").value(DISABLED_USERNAME)
+ .key("admin").value(false)
+ .key("disabled").value(true)
+ .endObject().toString()),
+ Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
+ }
+ }
+
+ public void testGetUserUnknown() throws Exception {
+ User user = authenticator.getUserForToken(UNKNOWN_TOKEN);
+ assertNull(user);
+ }
+
+ public void testGetAdminUser() throws Exception {
+ User user = authenticator.getUserForToken(ADMIN_TOKEN);
+ assertNotNull(user);
+ assertEquals(user.getName(), ADMIN_USERNAME);
+ assertTrue(user.isAdmin());
+ assertFalse(user.isDisabled());
+ }
+
+ public void testGetPlainUser() throws Exception {
+ User user = authenticator.getUserForToken(USER_TOKEN);
+ assertNotNull(user);
+ assertEquals(user.getName(), USER_USERNAME);
+ assertFalse(user.isAdmin());
+ assertFalse(user.isDisabled());
+ }
+
+ public void testGetDisabledUser() throws Exception {
+ User user = authenticator.getUserForToken(DISABLED_TOKEN);
+ assertNotNull(user);
+ assertEquals(user.getName(), DISABLED_USERNAME);
+ assertFalse(user.isAdmin());
+ assertTrue(user.isDisabled());
+ }
+
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestCellModel.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestCellModel.java
new file mode 100644
index 0000000..fb512ae
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestCellModel.java
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import junit.framework.TestCase;
+
+public class TestCellModel extends TestCase {
+
+ private static final long TIMESTAMP = 1245219839331L;
+ private static final byte[] COLUMN = Bytes.toBytes("testcolumn");
+ private static final byte[] VALUE = Bytes.toBytes("testvalue");
+
+ private static final String AS_XML =
+ "| " +
+ "dGVzdHZhbHVl | ";
+
+ private static final String AS_PB =
+ "Egp0ZXN0Y29sdW1uGOO6i+eeJCIJdGVzdHZhbHVl";
+
+ private JAXBContext context;
+
+ public TestCellModel() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(CellModel.class);
+ }
+
+ private CellModel buildTestModel() {
+ CellModel model = new CellModel();
+ model.setColumn(COLUMN);
+ model.setTimestamp(TIMESTAMP);
+ model.setValue(VALUE);
+ return model;
+ }
+
+ @SuppressWarnings("unused")
+ private String toXML(CellModel model) throws JAXBException {
+ StringWriter writer = new StringWriter();
+ context.createMarshaller().marshal(model, writer);
+ return writer.toString();
+ }
+
+ private CellModel fromXML(String xml) throws JAXBException {
+ return (CellModel)
+ context.createUnmarshaller().unmarshal(new StringReader(xml));
+ }
+
+ @SuppressWarnings("unused")
+ private byte[] toPB(CellModel model) {
+ return model.createProtobufOutput();
+ }
+
+ private CellModel fromPB(String pb) throws IOException {
+ return (CellModel)
+ new CellModel().getObjectFromMessage(Base64.decode(AS_PB));
+ }
+
+ private void checkModel(CellModel model) {
+ assertTrue(Bytes.equals(model.getColumn(), COLUMN));
+ assertTrue(Bytes.equals(model.getValue(), VALUE));
+ assertTrue(model.hasUserTimestamp());
+ assertEquals(model.getTimestamp(), TIMESTAMP);
+ }
+
+ public void testBuildModel() throws Exception {
+ checkModel(buildTestModel());
+ }
+
+ public void testFromXML() throws Exception {
+ checkModel(fromXML(AS_XML));
+ }
+
+ public void testFromPB() throws Exception {
+ checkModel(fromPB(AS_PB));
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestCellSetModel.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestCellSetModel.java
new file mode 100644
index 0000000..5b166b6
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestCellSetModel.java
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.Iterator;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import junit.framework.TestCase;
+
+public class TestCellSetModel extends TestCase {
+
+ private static final byte[] ROW1 = Bytes.toBytes("testrow1");
+ private static final byte[] COLUMN1 = Bytes.toBytes("testcolumn1");
+ private static final byte[] VALUE1 = Bytes.toBytes("testvalue1");
+ private static final long TIMESTAMP1 = 1245219839331L;
+ private static final byte[] ROW2 = Bytes.toBytes("testrow1");
+ private static final byte[] COLUMN2 = Bytes.toBytes("testcolumn2");
+ private static final byte[] VALUE2 = Bytes.toBytes("testvalue2");
+ private static final long TIMESTAMP2 = 1245239813319L;
+ private static final byte[] COLUMN3 = Bytes.toBytes("testcolumn3");
+ private static final byte[] VALUE3 = Bytes.toBytes("testvalue3");
+ private static final long TIMESTAMP3 = 1245393318192L;
+
+ private static final String AS_XML =
+ "" +
+ "" +
+ "| " +
+ "dGVzdHZhbHVlMQ== | " +
+ "
" +
+ "" +
+ "| " +
+ "dGVzdHZhbHVlMg== | " +
+ "" +
+ "dGVzdHZhbHVlMw== | " +
+ "
" +
+ " ";
+
+ private static final String AS_PB =
+ "CiwKCHRlc3Ryb3cxEiASC3Rlc3Rjb2x1bW4xGOO6i+eeJCIKdGVzdHZhbHVlMQpOCgh0ZXN0cm93" +
+ "MRIgEgt0ZXN0Y29sdW1uMhjHyc7wniQiCnRlc3R2YWx1ZTISIBILdGVzdGNvbHVtbjMYsOLnuZ8k" +
+ "Igp0ZXN0dmFsdWUz";
+
+ private JAXBContext context;
+
+ public TestCellSetModel() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(
+ CellModel.class,
+ CellSetModel.class,
+ RowModel.class);
+ }
+
+ private CellSetModel buildTestModel() {
+ CellSetModel model = new CellSetModel();
+ RowModel row;
+ row = new RowModel();
+ row.setKey(ROW1);
+ row.addCell(new CellModel(COLUMN1, TIMESTAMP1, VALUE1));
+ model.addRow(row);
+ row = new RowModel();
+ row.setKey(ROW2);
+ row.addCell(new CellModel(COLUMN2, TIMESTAMP2, VALUE2));
+ row.addCell(new CellModel(COLUMN3, TIMESTAMP3, VALUE3));
+ model.addRow(row);
+ return model;
+ }
+
+ @SuppressWarnings("unused")
+ private String toXML(CellSetModel model) throws JAXBException {
+ StringWriter writer = new StringWriter();
+ context.createMarshaller().marshal(model, writer);
+ return writer.toString();
+ }
+
+ private CellSetModel fromXML(String xml) throws JAXBException {
+ return (CellSetModel)
+ context.createUnmarshaller().unmarshal(new StringReader(xml));
+ }
+
+ @SuppressWarnings("unused")
+ private byte[] toPB(CellSetModel model) {
+ return model.createProtobufOutput();
+ }
+
+ private CellSetModel fromPB(String pb) throws IOException {
+ return (CellSetModel)
+ new CellSetModel().getObjectFromMessage(Base64.decode(AS_PB));
+ }
+
+ private void checkModel(CellSetModel model) {
+ Iterator rows = model.getRows().iterator();
+ RowModel row = rows.next();
+ assertTrue(Bytes.equals(ROW1, row.getKey()));
+ Iterator cells = row.getCells().iterator();
+ CellModel cell = cells.next();
+ assertTrue(Bytes.equals(COLUMN1, cell.getColumn()));
+ assertTrue(Bytes.equals(VALUE1, cell.getValue()));
+ assertTrue(cell.hasUserTimestamp());
+ assertEquals(cell.getTimestamp(), TIMESTAMP1);
+ assertFalse(cells.hasNext());
+ row = rows.next();
+ assertTrue(Bytes.equals(ROW2, row.getKey()));
+ cells = row.getCells().iterator();
+ cell = cells.next();
+ assertTrue(Bytes.equals(COLUMN2, cell.getColumn()));
+ assertTrue(Bytes.equals(VALUE2, cell.getValue()));
+ assertTrue(cell.hasUserTimestamp());
+ assertEquals(cell.getTimestamp(), TIMESTAMP2);
+ cell = cells.next();
+ assertTrue(Bytes.equals(COLUMN3, cell.getColumn()));
+ assertTrue(Bytes.equals(VALUE3, cell.getValue()));
+ assertTrue(cell.hasUserTimestamp());
+ assertEquals(cell.getTimestamp(), TIMESTAMP3);
+ assertFalse(cells.hasNext());
+ }
+
+ public void testBuildModel() throws Exception {
+ checkModel(buildTestModel());
+ }
+
+ public void testFromXML() throws Exception {
+ checkModel(fromXML(AS_XML));
+ }
+
+ public void testFromPB() throws Exception {
+ checkModel(fromPB(AS_PB));
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestColumnSchemaModel.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestColumnSchemaModel.java
new file mode 100644
index 0000000..94afd08
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestColumnSchemaModel.java
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.StringReader;
+import java.io.StringWriter;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import junit.framework.TestCase;
+
+public class TestColumnSchemaModel extends TestCase {
+
+ protected static final String COLUMN_NAME = "testcolumn";
+ protected static final boolean BLOCKCACHE = true;
+ protected static final int BLOCKSIZE = 16384;
+ protected static final boolean BLOOMFILTER = false;
+ protected static final String COMPRESSION = "GZ";
+ protected static final boolean IN_MEMORY = false;
+ protected static final int TTL = 86400;
+ protected static final int VERSIONS = 1;
+
+ protected static final String AS_XML =
+ "";
+
+ private JAXBContext context;
+
+ public TestColumnSchemaModel() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(ColumnSchemaModel.class);
+ }
+
+ protected static ColumnSchemaModel buildTestModel() {
+ ColumnSchemaModel model = new ColumnSchemaModel();
+ model.setName(COLUMN_NAME);
+ model.__setBlockcache(BLOCKCACHE);
+ model.__setBlocksize(BLOCKSIZE);
+ model.__setBloomfilter(BLOOMFILTER);
+ model.__setCompression(COMPRESSION);
+ model.__setInMemory(IN_MEMORY);
+ model.__setTTL(TTL);
+ model.__setVersions(VERSIONS);
+ return model;
+ }
+
+ @SuppressWarnings("unused")
+ private String toXML(ColumnSchemaModel model) throws JAXBException {
+ StringWriter writer = new StringWriter();
+ context.createMarshaller().marshal(model, writer);
+ return writer.toString();
+ }
+
+ private ColumnSchemaModel fromXML(String xml) throws JAXBException {
+ return (ColumnSchemaModel)
+ context.createUnmarshaller().unmarshal(new StringReader(xml));
+ }
+
+ protected static void checkModel(ColumnSchemaModel model) {
+ assertEquals(model.getName(), COLUMN_NAME);
+ assertEquals(model.__getBlockcache(), BLOCKCACHE);
+ assertEquals(model.__getBlocksize(), BLOCKSIZE);
+ assertEquals(model.__getBloomfilter(), BLOOMFILTER);
+ assertTrue(model.__getCompression().equalsIgnoreCase(COMPRESSION));
+ assertEquals(model.__getInMemory(), IN_MEMORY);
+ assertEquals(model.__getTTL(), TTL);
+ assertEquals(model.__getVersions(), VERSIONS);
+ }
+
+ public void testBuildModel() throws Exception {
+ checkModel(buildTestModel());
+ }
+
+ public void testFromXML() throws Exception {
+ checkModel(fromXML(AS_XML));
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestRowModel.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestRowModel.java
new file mode 100644
index 0000000..21c0284
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestRowModel.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.Iterator;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+import junit.framework.TestCase;
+
+public class TestRowModel extends TestCase {
+
+ private static final byte[] ROW1 = Bytes.toBytes("testrow1");
+ private static final byte[] COLUMN1 = Bytes.toBytes("testcolumn1");
+ private static final byte[] VALUE1 = Bytes.toBytes("testvalue1");
+ private static final long TIMESTAMP1 = 1245219839331L;
+
+ private static final String AS_XML =
+ "" +
+ "| " +
+ "dGVzdHZhbHVlMQ== | " +
+ "
";
+
+ private JAXBContext context;
+
+ public TestRowModel() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(
+ CellModel.class,
+ RowModel.class);
+ }
+
+ private RowModel buildTestModel() {
+ RowModel model = new RowModel();
+ model.setKey(ROW1);
+ model.addCell(new CellModel(COLUMN1, TIMESTAMP1, VALUE1));
+ return model;
+ }
+
+ @SuppressWarnings("unused")
+ private String toXML(RowModel model) throws JAXBException {
+ StringWriter writer = new StringWriter();
+ context.createMarshaller().marshal(model, writer);
+ return writer.toString();
+ }
+
+ private RowModel fromXML(String xml) throws JAXBException {
+ return (RowModel)
+ context.createUnmarshaller().unmarshal(new StringReader(xml));
+ }
+
+ private void checkModel(RowModel model) {
+ assertTrue(Bytes.equals(ROW1, model.getKey()));
+ Iterator cells = model.getCells().iterator();
+ CellModel cell = cells.next();
+ assertTrue(Bytes.equals(COLUMN1, cell.getColumn()));
+ assertTrue(Bytes.equals(VALUE1, cell.getValue()));
+ assertTrue(cell.hasUserTimestamp());
+ assertEquals(cell.getTimestamp(), TIMESTAMP1);
+ assertFalse(cells.hasNext());
+ }
+
+ public void testBuildModel() throws Exception {
+ checkModel(buildTestModel());
+ }
+
+ public void testFromXML() throws Exception {
+ checkModel(fromXML(AS_XML));
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestScannerModel.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestScannerModel.java
new file mode 100644
index 0000000..1feea55
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestScannerModel.java
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import junit.framework.TestCase;
+
+public class TestScannerModel extends TestCase {
+ private static final byte[] START_ROW = Bytes.toBytes("abracadabra");
+ private static final byte[] END_ROW = Bytes.toBytes("zzyzx");
+ private static final byte[] COLUMN1 = Bytes.toBytes("column1");
+ private static final byte[] COLUMN2 = Bytes.toBytes("column2:foo");
+ private static final long START_TIME = 1245219839331L;
+ private static final long END_TIME = 1245393318192L;
+ private static final int BATCH = 100;
+
+ private static final String AS_XML =
+ "" +
+ "Y29sdW1uMQ== " +
+ "Y29sdW1uMjpmb28= " +
+ " ";
+
+ private static final String AS_PB =
+ "CgthYnJhY2FkYWJyYRIFenp5engaB2NvbHVtbjEaC2NvbHVtbjI6Zm9vIGQo47qL554kMLDi57mf" +
+ "JA==";
+
+ private JAXBContext context;
+
+ public TestScannerModel() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(ScannerModel.class);
+ }
+
+ private ScannerModel buildTestModel() {
+ ScannerModel model = new ScannerModel();
+ model.setStartRow(START_ROW);
+ model.setEndRow(END_ROW);
+ model.addColumn(COLUMN1);
+ model.addColumn(COLUMN2);
+ model.setStartTime(START_TIME);
+ model.setEndTime(END_TIME);
+ model.setBatch(BATCH);
+ return model;
+ }
+
+ @SuppressWarnings("unused")
+ private String toXML(ScannerModel model) throws JAXBException {
+ StringWriter writer = new StringWriter();
+ context.createMarshaller().marshal(model, writer);
+ return writer.toString();
+ }
+
+ private ScannerModel fromXML(String xml) throws JAXBException {
+ return (ScannerModel)
+ context.createUnmarshaller().unmarshal(new StringReader(xml));
+ }
+
+ @SuppressWarnings("unused")
+ private byte[] toPB(ScannerModel model) {
+ return model.createProtobufOutput();
+ }
+
+ private ScannerModel fromPB(String pb) throws IOException {
+ return (ScannerModel)
+ new ScannerModel().getObjectFromMessage(Base64.decode(AS_PB));
+ }
+
+ private void checkModel(ScannerModel model) {
+ assertTrue(Bytes.equals(model.getStartRow(), START_ROW));
+ assertTrue(Bytes.equals(model.getEndRow(), END_ROW));
+ boolean foundCol1 = false, foundCol2 = false;
+ for (byte[] column: model.getColumns()) {
+ if (Bytes.equals(column, COLUMN1)) {
+ foundCol1 = true;
+ } else if (Bytes.equals(column, COLUMN2)) {
+ foundCol2 = true;
+ }
+ }
+ assertTrue(foundCol1);
+ assertTrue(foundCol2);
+ assertEquals(model.getStartTime(), START_TIME);
+ assertEquals(model.getEndTime(), END_TIME);
+ assertEquals(model.getBatch(), BATCH);
+ }
+
+ public void testBuildModel() throws Exception {
+ checkModel(buildTestModel());
+ }
+
+ public void testFromXML() throws Exception {
+ checkModel(fromXML(AS_XML));
+ }
+
+ public void testFromPB() throws Exception {
+ checkModel(fromPB(AS_PB));
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestStorageClusterStatusModel.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestStorageClusterStatusModel.java
new file mode 100644
index 0000000..c855b4e
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestStorageClusterStatusModel.java
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.Iterator;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import junit.framework.TestCase;
+
+public class TestStorageClusterStatusModel extends TestCase {
+
+ private static final String AS_XML =
+ "" +
+ " " +
+ "" +
+ " " +
+ "" +
+ " "+
+ " ";
+
+ private static final String AS_PB =
+"Ci0KBXRlc3QxEOO6i+eeJBgAIIABKIAIMhUKCS1ST09ULSwsMBABGAEgACgAMAAKOQoFdGVzdDIQ"+
+"/pKx8J4kGAAggAQogAgyIQoVLk1FVEEuLCwxMjQ2MDAwMDQzNzI0EAEYASAAKAAwABgCIAApAAAA"+
+"AAAA8D8=";
+
+ private JAXBContext context;
+
+ public TestStorageClusterStatusModel() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(StorageClusterStatusModel.class);
+ }
+
+ private StorageClusterStatusModel buildTestModel() {
+ StorageClusterStatusModel model = new StorageClusterStatusModel();
+ model.setRegions(2);
+ model.setRequests(0);
+ model.setAverageLoad(1.0);
+ model.addLiveNode("test1", 1245219839331L, 128, 1024)
+ .addRegion(Bytes.toBytes("-ROOT-,,0"), 1, 1, 0, 0, 0);
+ model.addLiveNode("test2", 1245239331198L, 512, 1024)
+ .addRegion(Bytes.toBytes(".META.,,1246000043724"),1, 1, 0, 0, 0);
+ return model;
+ }
+
+ @SuppressWarnings("unused")
+ private String toXML(StorageClusterStatusModel model) throws JAXBException {
+ StringWriter writer = new StringWriter();
+ context.createMarshaller().marshal(model, writer);
+ return writer.toString();
+ }
+
+ private StorageClusterStatusModel fromXML(String xml) throws JAXBException {
+ return (StorageClusterStatusModel)
+ context.createUnmarshaller().unmarshal(new StringReader(xml));
+ }
+
+ @SuppressWarnings("unused")
+ private byte[] toPB(StorageClusterStatusModel model) {
+ return model.createProtobufOutput();
+ }
+
+ private StorageClusterStatusModel fromPB(String pb) throws IOException {
+ return (StorageClusterStatusModel)
+ new StorageClusterStatusModel().getObjectFromMessage(Base64.decode(AS_PB));
+ }
+
+ private void checkModel(StorageClusterStatusModel model) {
+ assertEquals(model.getRegions(), 2);
+ assertEquals(model.getRequests(), 0);
+ assertEquals(model.getAverageLoad(), 1.0);
+ Iterator nodes =
+ model.getLiveNodes().iterator();
+ StorageClusterStatusModel.Node node = nodes.next();
+ assertEquals(node.getName(), "test1");
+ assertEquals(node.getStartCode(), 1245219839331L);
+ assertEquals(node.getHeapSizeMB(), 128);
+ assertEquals(node.getMaxHeapSizeMB(), 1024);
+ Iterator regions =
+ node.getRegions().iterator();
+ StorageClusterStatusModel.Node.Region region = regions.next();
+ assertTrue(Bytes.toString(region.getName()).equals("-ROOT-,,0"));
+ assertEquals(region.getStores(), 1);
+ assertEquals(region.getStorefiles(), 1);
+ assertEquals(region.getStorefileSizeMB(), 0);
+ assertEquals(region.getMemstoreSizeMB(), 0);
+ assertEquals(region.getStorefileIndexSizeMB(), 0);
+ assertFalse(regions.hasNext());
+ node = nodes.next();
+ assertEquals(node.getName(), "test2");
+ assertEquals(node.getStartCode(), 1245239331198L);
+ assertEquals(node.getHeapSizeMB(), 512);
+ assertEquals(node.getMaxHeapSizeMB(), 1024);
+ regions = node.getRegions().iterator();
+ region = regions.next();
+ assertEquals(Bytes.toString(region.getName()), ".META.,,1246000043724");
+ assertEquals(region.getStores(), 1);
+ assertEquals(region.getStorefiles(), 1);
+ assertEquals(region.getStorefileSizeMB(), 0);
+ assertEquals(region.getMemstoreSizeMB(), 0);
+ assertEquals(region.getStorefileIndexSizeMB(), 0);
+ assertFalse(regions.hasNext());
+ assertFalse(nodes.hasNext());
+ }
+
+ public void testBuildModel() throws Exception {
+ checkModel(buildTestModel());
+ }
+
+ public void testFromXML() throws Exception {
+ checkModel(fromXML(AS_XML));
+ }
+
+ public void testFromPB() throws Exception {
+ checkModel(fromPB(AS_PB));
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestStorageClusterVersionModel.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestStorageClusterVersionModel.java
new file mode 100644
index 0000000..f7460da
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestStorageClusterVersionModel.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.StringReader;
+import java.io.StringWriter;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import junit.framework.TestCase;
+
+public class TestStorageClusterVersionModel extends TestCase {
+ private static final String VERSION = "0.0.1-testing";
+
+ private static final String AS_XML =
+ "" + VERSION + " ";
+
+ private JAXBContext context;
+
+ public TestStorageClusterVersionModel() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(StorageClusterVersionModel.class);
+ }
+
+ private StorageClusterVersionModel buildTestModel() {
+ StorageClusterVersionModel model = new StorageClusterVersionModel();
+ model.setVersion(VERSION);
+ return model;
+ }
+
+ @SuppressWarnings("unused")
+ private String toXML(StorageClusterVersionModel model) throws JAXBException {
+ StringWriter writer = new StringWriter();
+ context.createMarshaller().marshal(model, writer);
+ return writer.toString();
+ }
+
+ private StorageClusterVersionModel fromXML(String xml) throws JAXBException {
+ return (StorageClusterVersionModel)
+ context.createUnmarshaller().unmarshal(new StringReader(xml));
+ }
+
+ private void checkModel(StorageClusterVersionModel model) {
+ assertEquals(model.getVersion(), VERSION);
+ }
+
+ public void testBuildModel() throws Exception {
+ checkModel(buildTestModel());
+ }
+
+ public void testFromXML() throws Exception {
+ checkModel(fromXML(AS_XML));
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestTableInfoModel.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestTableInfoModel.java
new file mode 100644
index 0000000..a6962a9
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestTableInfoModel.java
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.Iterator;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.util.Base64;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import junit.framework.TestCase;
+
+public class TestTableInfoModel extends TestCase {
+ private static final String TABLE = "testtable";
+ private static final byte[] START_KEY = Bytes.toBytes("abracadbra");
+ private static final byte[] END_KEY = Bytes.toBytes("zzyzx");
+ private static final long ID = 8731042424L;
+ private static final String LOCATION = "testhost:9876";
+
+ private static final String AS_XML =
+ "" +
+ "" +
+ " ";
+
+ private static final String AS_PB =
+ "Cgl0ZXN0dGFibGUSSQofdGVzdHRhYmxlLGFicmFjYWRicmEsODczMTA0MjQyNBIKYWJyYWNhZGJy" +
+ "YRoFenp5engg+MSkwyAqDXRlc3Rob3N0Ojk4NzY=";
+
+ private JAXBContext context;
+
+ public TestTableInfoModel() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(
+ TableInfoModel.class,
+ TableRegionModel.class);
+ }
+
+ private TableInfoModel buildTestModel() {
+ TableInfoModel model = new TableInfoModel();
+ model.setName(TABLE);
+ model.add(new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION));
+ return model;
+ }
+
+ @SuppressWarnings("unused")
+ private String toXML(TableInfoModel model) throws JAXBException {
+ StringWriter writer = new StringWriter();
+ context.createMarshaller().marshal(model, writer);
+ return writer.toString();
+ }
+
+ private TableInfoModel fromXML(String xml) throws JAXBException {
+ return (TableInfoModel)
+ context.createUnmarshaller().unmarshal(new StringReader(xml));
+ }
+
+ @SuppressWarnings("unused")
+ private byte[] toPB(TableInfoModel model) {
+ return model.createProtobufOutput();
+ }
+
+ private TableInfoModel fromPB(String pb) throws IOException {
+ return (TableInfoModel)
+ new TableInfoModel().getObjectFromMessage(Base64.decode(AS_PB));
+ }
+
+ private void checkModel(TableInfoModel model) {
+ assertEquals(model.getName(), TABLE);
+ Iterator regions = model.getRegions().iterator();
+ TableRegionModel region = regions.next();
+ assertTrue(Bytes.equals(region.getStartKey(), START_KEY));
+ assertTrue(Bytes.equals(region.getEndKey(), END_KEY));
+ assertEquals(region.getId(), ID);
+ assertEquals(region.getLocation(), LOCATION);
+ assertFalse(regions.hasNext());
+ }
+
+ public void testBuildModel() throws Exception {
+ checkModel(buildTestModel());
+ }
+
+ public void testFromXML() throws Exception {
+ checkModel(fromXML(AS_XML));
+ }
+
+ public void testFromPB() throws Exception {
+ checkModel(fromPB(AS_PB));
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestTableListModel.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestTableListModel.java
new file mode 100644
index 0000000..0ae33af
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestTableListModel.java
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.Iterator;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.util.Base64;
+
+import junit.framework.TestCase;
+
+public class TestTableListModel extends TestCase {
+ private static final String TABLE1 = "table1";
+ private static final String TABLE2 = "table2";
+ private static final String TABLE3 = "table3";
+
+ private static final String AS_XML =
+ "" +
+ "";
+
+ private static final String AS_PB = "CgZ0YWJsZTEKBnRhYmxlMgoGdGFibGUz";
+
+ private JAXBContext context;
+
+ public TestTableListModel() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(
+ TableListModel.class,
+ TableModel.class);
+ }
+
+ private TableListModel buildTestModel() {
+ TableListModel model = new TableListModel();
+ model.add(new TableModel(TABLE1));
+ model.add(new TableModel(TABLE2));
+ model.add(new TableModel(TABLE3));
+ return model;
+ }
+
+ @SuppressWarnings("unused")
+ private String toXML(TableListModel model) throws JAXBException {
+ StringWriter writer = new StringWriter();
+ context.createMarshaller().marshal(model, writer);
+ return writer.toString();
+ }
+
+ private TableListModel fromXML(String xml) throws JAXBException {
+ return (TableListModel)
+ context.createUnmarshaller().unmarshal(new StringReader(xml));
+ }
+
+ @SuppressWarnings("unused")
+ private byte[] toPB(TableListModel model) {
+ return model.createProtobufOutput();
+ }
+
+ private TableListModel fromPB(String pb) throws IOException {
+ return (TableListModel)
+ new TableListModel().getObjectFromMessage(Base64.decode(AS_PB));
+ }
+
+ private void checkModel(TableListModel model) {
+ Iterator tables = model.getTables().iterator();
+ TableModel table = tables.next();
+ assertEquals(table.getName(), TABLE1);
+ table = tables.next();
+ assertEquals(table.getName(), TABLE2);
+ table = tables.next();
+ assertEquals(table.getName(), TABLE3);
+ assertFalse(tables.hasNext());
+ }
+
+ public void testBuildModel() throws Exception {
+ checkModel(buildTestModel());
+ }
+
+ public void testFromXML() throws Exception {
+ checkModel(fromXML(AS_XML));
+ }
+
+ public void testFromPB() throws Exception {
+ checkModel(fromPB(AS_PB));
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestTableRegionModel.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestTableRegionModel.java
new file mode 100644
index 0000000..6950341
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestTableRegionModel.java
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.StringReader;
+import java.io.StringWriter;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+import junit.framework.TestCase;
+
+public class TestTableRegionModel extends TestCase {
+ private static final String TABLE = "testtable";
+ private static final byte[] START_KEY = Bytes.toBytes("abracadbra");
+ private static final byte[] END_KEY = Bytes.toBytes("zzyzx");
+ private static final long ID = 8731042424L;
+ private static final String LOCATION = "testhost:9876";
+
+ private static final String AS_XML =
+ "";
+
+ private JAXBContext context;
+
+ public TestTableRegionModel() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(TableRegionModel.class);
+ }
+
+ private TableRegionModel buildTestModel() {
+ TableRegionModel model =
+ new TableRegionModel(TABLE, ID, START_KEY, END_KEY, LOCATION);
+ return model;
+ }
+
+ @SuppressWarnings("unused")
+ private String toXML(TableRegionModel model) throws JAXBException {
+ StringWriter writer = new StringWriter();
+ context.createMarshaller().marshal(model, writer);
+ return writer.toString();
+ }
+
+ private TableRegionModel fromXML(String xml) throws JAXBException {
+ return (TableRegionModel)
+ context.createUnmarshaller().unmarshal(new StringReader(xml));
+ }
+
+ private void checkModel(TableRegionModel model) {
+ assertTrue(Bytes.equals(model.getStartKey(), START_KEY));
+ assertTrue(Bytes.equals(model.getEndKey(), END_KEY));
+ assertEquals(model.getId(), ID);
+ assertEquals(model.getLocation(), LOCATION);
+ assertEquals(model.getName(),
+ TABLE + "," + Bytes.toString(START_KEY) + "," + Long.toString(ID));
+ }
+
+ public void testBuildModel() throws Exception {
+ checkModel(buildTestModel());
+ }
+
+ public void testFromXML() throws Exception {
+ checkModel(fromXML(AS_XML));
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestTableSchemaModel.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestTableSchemaModel.java
new file mode 100644
index 0000000..4d3c11e
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestTableSchemaModel.java
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.Iterator;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.util.Base64;
+
+import junit.framework.TestCase;
+
+public class TestTableSchemaModel extends TestCase {
+
+ public static final String TABLE_NAME = "testTable";
+ private static final boolean IS_META = false;
+ private static final boolean IS_ROOT = false;
+ private static final boolean READONLY = false;
+
+ private static final String AS_XML =
+ "" +
+ TestColumnSchemaModel.AS_XML +
+ " ";
+
+ private static final String AS_PB =
+ "Cgl0ZXN0VGFibGUSEAoHSVNfTUVUQRIFZmFsc2USEAoHSVNfUk9PVBIFZmFsc2USEQoIUkVBRE9O" +
+ "TFkSBWZhbHNlEhIKCUlOX01FTU9SWRIFZmFsc2UamAEKCnRlc3Rjb2x1bW4SEgoJQkxPQ0tTSVpF" +
+ "EgUxNjM4NBIUCgtCTE9PTUZJTFRFUhIFZmFsc2USEgoKQkxPQ0tDQUNIRRIEdHJ1ZRIRCgtDT01Q" +
+ "UkVTU0lPThICZ3oSDQoIVkVSU0lPTlMSATESDAoDVFRMEgU4NjQwMBISCglJTl9NRU1PUlkSBWZh" +
+ "bHNlGICjBSABKgJneiAAKAA=";
+
+ private JAXBContext context;
+
+ public TestTableSchemaModel() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(
+ ColumnSchemaModel.class,
+ TableSchemaModel.class);
+ }
+
+ public static TableSchemaModel buildTestModel() {
+ return buildTestModel(TABLE_NAME);
+ }
+
+ public static TableSchemaModel buildTestModel(String name) {
+ TableSchemaModel model = new TableSchemaModel();
+ model.setName(name);
+ model.__setIsMeta(IS_META);
+ model.__setIsRoot(IS_ROOT);
+ model.__setReadOnly(READONLY);
+ model.addColumnFamily(TestColumnSchemaModel.buildTestModel());
+ return model;
+ }
+
+ @SuppressWarnings("unused")
+ private String toXML(TableSchemaModel model) throws JAXBException {
+ StringWriter writer = new StringWriter();
+ context.createMarshaller().marshal(model, writer);
+ return writer.toString();
+ }
+
+ private TableSchemaModel fromXML(String xml) throws JAXBException {
+ return (TableSchemaModel)
+ context.createUnmarshaller().unmarshal(new StringReader(xml));
+ }
+
+ @SuppressWarnings("unused")
+ private byte[] toPB(TableSchemaModel model) {
+ return model.createProtobufOutput();
+ }
+
+ private TableSchemaModel fromPB(String pb) throws IOException {
+ return (TableSchemaModel)
+ new TableSchemaModel().getObjectFromMessage(Base64.decode(AS_PB));
+ }
+
+ public static void checkModel(TableSchemaModel model) {
+ checkModel(model, TABLE_NAME);
+ }
+
+ public static void checkModel(TableSchemaModel model, String tableName) {
+ assertEquals(model.getName(), tableName);
+ assertEquals(model.__getIsMeta(), IS_META);
+ assertEquals(model.__getIsRoot(), IS_ROOT);
+ assertEquals(model.__getReadOnly(), READONLY);
+ Iterator families = model.getColumns().iterator();
+ assertTrue(families.hasNext());
+ ColumnSchemaModel family = families.next();
+ TestColumnSchemaModel.checkModel(family);
+ assertFalse(families.hasNext());
+ }
+
+ public void testBuildModel() throws Exception {
+ checkModel(buildTestModel());
+ }
+
+ public void testFromXML() throws Exception {
+ checkModel(fromXML(AS_XML));
+ }
+
+ public void testFromPB() throws Exception {
+ checkModel(fromPB(AS_PB));
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestVersionModel.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestVersionModel.java
new file mode 100644
index 0000000..54ae189
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/model/TestVersionModel.java
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.model;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+
+import org.apache.hadoop.hbase.util.Base64;
+
+import junit.framework.TestCase;
+
+public class TestVersionModel extends TestCase {
+ private static final String STARGATE_VERSION = "0.0.1";
+ private static final String OS_VERSION =
+ "Linux 2.6.18-128.1.6.el5.centos.plusxen amd64";
+ private static final String JVM_VERSION =
+ "Sun Microsystems Inc. 1.6.0_13-11.3-b02";
+ private static final String JETTY_VERSION = "6.1.14";
+ private static final String JERSEY_VERSION = "1.1.0-ea";
+
+ private static final String AS_XML =
+ " ";
+
+ private static final String AS_PB =
+ "CgUwLjAuMRInU3VuIE1pY3Jvc3lzdGVtcyBJbmMuIDEuNi4wXzEzLTExLjMtYjAyGi1MaW51eCAy" +
+ "LjYuMTgtMTI4LjEuNi5lbDUuY2VudG9zLnBsdXN4ZW4gYW1kNjQiBjYuMS4xNCoIMS4xLjAtZWE=";
+
+ private JAXBContext context;
+
+ public TestVersionModel() throws JAXBException {
+ super();
+ context = JAXBContext.newInstance(VersionModel.class);
+ }
+
+ private VersionModel buildTestModel() {
+ VersionModel model = new VersionModel();
+ model.setStargateVersion(STARGATE_VERSION);
+ model.setOsVersion(OS_VERSION);
+ model.setJvmVersion(JVM_VERSION);
+ model.setServerVersion(JETTY_VERSION);
+ model.setJerseyVersion(JERSEY_VERSION);
+ return model;
+ }
+
+ @SuppressWarnings("unused")
+ private String toXML(VersionModel model) throws JAXBException {
+ StringWriter writer = new StringWriter();
+ context.createMarshaller().marshal(model, writer);
+ return writer.toString();
+ }
+
+ private VersionModel fromXML(String xml) throws JAXBException {
+ return (VersionModel)
+ context.createUnmarshaller().unmarshal(new StringReader(xml));
+ }
+
+ @SuppressWarnings("unused")
+ private byte[] toPB(VersionModel model) {
+ return model.createProtobufOutput();
+ }
+
+ private VersionModel fromPB(String pb) throws IOException {
+ return (VersionModel)
+ new VersionModel().getObjectFromMessage(Base64.decode(AS_PB));
+ }
+
+ private void checkModel(VersionModel model) {
+ assertEquals(model.getStargateVersion(), STARGATE_VERSION);
+ assertEquals(model.getOsVersion(), OS_VERSION);
+ assertEquals(model.getJvmVersion(), JVM_VERSION);
+ assertEquals(model.getServerVersion(), JETTY_VERSION);
+ assertEquals(model.getJerseyVersion(), JERSEY_VERSION);
+ }
+
+ public void testBuildModel() throws Exception {
+ checkModel(buildTestModel());
+ }
+
+ public void testFromXML() throws Exception {
+ checkModel(fromXML(AS_XML));
+ }
+
+ public void testFromPB() throws Exception {
+ checkModel(fromPB(AS_PB));
+ }
+}
diff --git a/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/util/TestHTableTokenBucket.java b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/util/TestHTableTokenBucket.java
new file mode 100644
index 0000000..a7884fa
--- /dev/null
+++ b/contrib/stargate/core/src/test/java/org/apache/hadoop/hbase/stargate/util/TestHTableTokenBucket.java
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.stargate.util;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.stargate.MiniClusterTestBase;
+import org.apache.hadoop.hbase.stargate.util.HTableTokenBucket;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class TestHTableTokenBucket extends MiniClusterTestBase {
+
+ static final String TABLE = "users";
+ static final byte[] USER = Bytes.toBytes("user");
+ static final byte[] NAME = Bytes.toBytes("name");
+ static final byte[] TOKENS = Bytes.toBytes("tokens");
+ static final byte[] TOKENS_RATE = Bytes.toBytes("tokens.rate");
+ static final byte[] TOKENS_SIZE = Bytes.toBytes("tokens.size");
+ static final String USER_TOKEN = "da4829144e3a2febd909a6e1b4ed7cfa";
+ static final String USER_USERNAME = "testUser";
+ static final double RATE = 1; // per second
+ static final long SIZE = 10;
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+ HBaseAdmin admin = new HBaseAdmin(conf);
+ if (!admin.tableExists(TABLE)) {
+ HTableDescriptor htd = new HTableDescriptor(TABLE);
+ htd.addFamily(new HColumnDescriptor(USER));
+ admin.createTable(htd);
+ HTable table = new HTable(TABLE);
+ Put put = new Put(Bytes.toBytes(USER_TOKEN));
+ put.add(USER, NAME, Bytes.toBytes(USER_USERNAME));
+ put.add(USER, TOKENS_RATE, Bytes.toBytes(RATE));
+ put.add(USER, TOKENS_SIZE, Bytes.toBytes(SIZE));
+ table.put(put);
+ table.flushCommits();
+ }
+ }
+
+ public void testTokenBucketConfig() throws Exception {
+ HTableTokenBucket tb = new HTableTokenBucket(conf, TABLE,
+ Bytes.toBytes(USER_TOKEN));
+ assertEquals(tb.getRate(), RATE);
+ assertEquals(tb.getSize(), SIZE);
+ }
+
+ public void testTokenBucket() throws Exception {
+ HTableTokenBucket tb = new HTableTokenBucket(conf, TABLE,
+ Bytes.toBytes(USER_TOKEN));
+ int last = 0;
+ for (int i = 0; i <= 5 && last < 10; i++) {
+ int avail = tb.available();
+ assertTrue("bucket did not increment", avail > last);
+ assertTrue("bucket updated too quickly", avail - last < 3);
+ last = avail;
+ Thread.sleep(2000);
+ }
+ assertTrue("bucket did not fill", last >= 10);
+ assertTrue("bucket overfilled", last == 10);
+ }
+
+}
diff --git a/contrib/stargate/pom.xml b/contrib/stargate/pom.xml
index 16e201e..383b2fc 100644
--- a/contrib/stargate/pom.xml
+++ b/contrib/stargate/pom.xml
@@ -4,7 +4,7 @@
4.0.0
hbase-contrib-stargate
- war
+ pom
HBase Contrib - Stargate
@@ -12,75 +12,9 @@
hbase-contrib
0.21.0-SNAPSHOT
-
-
- 1.1.1
- 2.3.0
- 1.1.4.1
- 20090211
- 1.8.0.10
-
-
-
-
-
- org.apache.maven.plugins
- maven-war-plugin
-
- conf/web.xml
-
-
-
-
+
+ core
+ war
+
-
-
- ${project.groupId}
- hbase-core
-
-
- ${project.groupId}
- hbase-core
- tests
-
-
- org.apache.hadoop
- hadoop-core-test
-
-
- org.apache.hadoop
- hadoop-hdfs-test
-
-
- javax.ws.rs
- jsr311-api
- ${jsr311.version}
-
-
- com.google.protobuf
- protobuf-java
- ${protobuf.version}
-
-
- com.sun.jersey
- jersey-json
- ${jersey.version}
-
-
- com.sun.jersey
- jersey-server
- ${jersey.version}
-
-
- org.json
- json
- ${json.version}
-
-
- hsqldb
- hsqldb
- ${hsqldb.version}
- test
-
-
-
+
\ No newline at end of file
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/Constants.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/Constants.java
deleted file mode 100644
index 2e6767c..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/Constants.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-/**
- * Common constants for org.apache.hadoop.hbase.stargate
- */
-public interface Constants {
- public static final String VERSION_STRING = "0.0.2";
-
- public static final String AUTHENTICATOR_KEY = "stargate.authenticator";
- public static final String MULTIUSER_KEY = "stargate.multiuser";
- public static final String STATUS_REPORT_PERIOD_KEY =
- "stargate.status.period";
-
- public static final String USERS_TABLE = "users";
-
- public static final String INSTANCE_ZNODE_ROOT = "/stargate/instance";
- public static final String USERS_ZNODE_ROOT = "/stargate/users";
-
- public static final int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours
-
- public static final String MIMETYPE_TEXT = "text/plain";
- public static final String MIMETYPE_HTML = "text/html";
- public static final String MIMETYPE_XML = "text/xml";
- public static final String MIMETYPE_BINARY = "application/octet-stream";
- public static final String MIMETYPE_PROTOBUF = "application/x-protobuf";
- public static final String MIMETYPE_JSON = "application/json";
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/Main.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/Main.java
deleted file mode 100644
index c254ba4..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/Main.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright 2009 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.PosixParser;
-import org.mortbay.jetty.Connector;
-import org.mortbay.jetty.Server;
-import org.mortbay.jetty.servlet.Context;
-import org.mortbay.jetty.servlet.ServletHolder;
-
-import com.sun.jersey.spi.container.servlet.ServletContainer;
-
-/**
- * Main class for launching Stargate as a servlet hosted by an embedded Jetty
- * servlet container.
- *
- * The following options are supported:
- *
- */
-public class Main implements Constants {
-
- public static void main(String[] args) throws Exception {
- // process command line
-
- Options options = new Options();
- options.addOption("p", "port", true, "service port");
- options.addOption("m", "multiuser", false, "enable multiuser mode");
- CommandLineParser parser = new PosixParser();
- CommandLine cmd = parser.parse(options, args);
- int port = 8080;
- if (cmd.hasOption("p")) {
- port = Integer.valueOf(cmd.getOptionValue("p"));
- }
-
- // set up the Jersey servlet container for Jetty
-
- ServletHolder sh = new ServletHolder(ServletContainer.class);
- sh.setInitParameter(
- "com.sun.jersey.config.property.resourceConfigClass",
- ResourceConfig.class.getCanonicalName());
- sh.setInitParameter("com.sun.jersey.config.property.packages",
- "jetty");
-
- // set up Jetty and run the embedded server
-
- Server server = new Server(port);
- server.setSendServerVersion(false);
- server.setSendDateHeader(false);
- server.setStopAtShutdown(true);
- // set up context
- Context context = new Context(server, "/", Context.SESSIONS);
- context.addServlet(sh, "/*");
-
- // configure the Stargate singleton
-
- RESTServlet servlet = RESTServlet.getInstance();
- servlet.setMultiUser(cmd.hasOption("m"));
- for (Connector conn: server.getConnectors()) {
- servlet.addConnectorAddress(conn.getHost(), conn.getLocalPort());
- }
-
- server.start();
- server.join();
- }
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ProtobufMessageHandler.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ProtobufMessageHandler.java
deleted file mode 100644
index 611a728..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ProtobufMessageHandler.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-
-/**
- * Common interface for models capable of supporting protobuf marshalling
- * and unmarshalling. Hooks up to the ProtobufMessageBodyConsumer and
- * ProtobufMessageBodyProducer adapters.
- */
-public abstract interface ProtobufMessageHandler {
- /**
- * @return the protobuf represention of the model
- */
- public byte[] createProtobufOutput();
-
- /**
- * Initialize the model from a protobuf representation.
- * @param message the raw bytes of the protobuf message
- * @return reference to self for convenience
- * @throws IOException
- */
- public ProtobufMessageHandler getObjectFromMessage(byte[] message)
- throws IOException;
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RESTServlet.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RESTServlet.java
deleted file mode 100644
index f6a3039..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RESTServlet.java
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Chore;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.HTablePool;
-import org.apache.hadoop.hbase.stargate.auth.Authenticator;
-import org.apache.hadoop.hbase.stargate.auth.HBCAuthenticator;
-import org.apache.hadoop.hbase.stargate.auth.HTableAuthenticator;
-import org.apache.hadoop.hbase.stargate.auth.JDBCAuthenticator;
-import org.apache.hadoop.hbase.stargate.auth.ZooKeeperAuthenticator;
-import org.apache.hadoop.hbase.stargate.metrics.StargateMetrics;
-import org.apache.hadoop.hbase.stargate.util.HTableTokenBucket;
-import org.apache.hadoop.hbase.stargate.util.SoftUserData;
-import org.apache.hadoop.hbase.stargate.util.UserData;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
-
-import org.apache.hadoop.util.StringUtils;
-
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.Watcher.Event.EventType;
-import org.apache.zookeeper.Watcher.Event.KeeperState;
-import org.apache.zookeeper.ZooDefs.Ids;
-import org.apache.zookeeper.data.Stat;
-
-import org.json.JSONStringer;
-
-import com.sun.jersey.server.impl.container.servlet.ServletAdaptor;
-
-/**
- * Singleton class encapsulating global REST servlet state and functions.
- */
-public class RESTServlet extends ServletAdaptor
- implements Constants, Watcher {
-
- private static final Log LOG = LogFactory.getLog(RESTServlet.class);
- private static final long serialVersionUID = 1L;
-
- private static RESTServlet instance;
-
- class StatusReporter extends Chore {
-
- public StatusReporter(int period, AtomicBoolean stopping) {
- super(period, stopping);
- }
-
- @Override
- protected void chore() {
- if (wrapper != null) try {
- JSONStringer status = new JSONStringer();
- status.object();
- status.key("requests").value(metrics.getRequests());
- status.key("connectors").array();
- for (Pair e: connectors) {
- status.object()
- .key("host").value(e.getFirst())
- .key("port").value(e.getSecond())
- .endObject();
- }
- status.endArray();
- status.endObject();
- updateNode(wrapper, znode, CreateMode.EPHEMERAL,
- Bytes.toBytes(status.toString()));
- } catch (Exception e) {
- LOG.error(StringUtils.stringifyException(e));
- }
- }
-
- }
-
- final String znode = INSTANCE_ZNODE_ROOT + "/" + System.currentTimeMillis();
- transient final Configuration conf;
- transient final HTablePool pool;
- transient volatile ZooKeeperWrapper wrapper;
- transient Chore statusReporter;
- transient Authenticator authenticator;
- AtomicBoolean stopping = new AtomicBoolean(false);
- boolean multiuser;
- Map maxAgeMap =
- Collections.synchronizedMap(new HashMap());
- List> connectors =
- Collections.synchronizedList(new ArrayList>());
- StargateMetrics metrics = new StargateMetrics();
-
- /**
- * @return the RESTServlet singleton instance
- * @throws IOException
- */
- public synchronized static RESTServlet getInstance() throws IOException {
- if (instance == null) {
- instance = new RESTServlet();
- }
- return instance;
- }
-
- static boolean ensureExists(final ZooKeeperWrapper zkw, final String znode,
- final CreateMode mode) throws IOException {
- ZooKeeper zk = zkw.getZooKeeper();
- try {
- Stat stat = zk.exists(znode, false);
- if (stat != null) {
- return true;
- }
- zk.create(znode, new byte[0], Ids.OPEN_ACL_UNSAFE, mode);
- LOG.debug("Created ZNode " + znode);
- return true;
- } catch (KeeperException.NodeExistsException e) {
- return true; // ok, move on.
- } catch (KeeperException.NoNodeException e) {
- return ensureParentExists(zkw, znode, mode) &&
- ensureExists(zkw, znode, mode);
- } catch (KeeperException e) {
- throw new IOException(e);
- } catch (InterruptedException e) {
- throw new IOException(e);
- }
- }
-
- static boolean ensureParentExists(final ZooKeeperWrapper zkw,
- final String znode, final CreateMode mode) throws IOException {
- int index = znode.lastIndexOf("/");
- if (index <= 0) { // Parent is root, which always exists.
- return true;
- }
- return ensureExists(zkw, znode.substring(0, index), mode);
- }
-
- static void updateNode(final ZooKeeperWrapper zkw, final String znode,
- final CreateMode mode, final byte[] data) throws IOException {
- ensureExists(zkw, znode, mode);
- ZooKeeper zk = zkw.getZooKeeper();
- try {
- zk.setData(znode, data, -1);
- } catch (KeeperException e) {
- throw new IOException(e);
- } catch (InterruptedException e) {
- throw new IOException(e);
- }
- }
-
- ZooKeeperWrapper initZooKeeperWrapper() throws IOException {
- return new ZooKeeperWrapper(conf, this);
- }
-
- /**
- * Constructor
- * @throws IOException
- */
- public RESTServlet() throws IOException {
- this.conf = HBaseConfiguration.create();
- this.pool = new HTablePool(conf, 10);
- this.wrapper = initZooKeeperWrapper();
- this.statusReporter = new StatusReporter(
- conf.getInt(STATUS_REPORT_PERIOD_KEY, 1000 * 60), stopping);
- this.multiuser = conf.getBoolean("stargate.multiuser", false);
- }
-
- @Override
- public void process(WatchedEvent event) {
- LOG.debug(("ZooKeeper.Watcher event " + event.getType() + " with path " +
- event.getPath()));
- // handle disconnection (or manual delete to test disconnection scenario)
- if (event.getState() == KeeperState.Expired ||
- (event.getType().equals(EventType.NodeDeleted) &&
- event.getPath().equals(znode))) {
- wrapper.close();
- wrapper = null;
- while (!stopping.get()) try {
- wrapper = initZooKeeperWrapper();
- break;
- } catch (IOException e) {
- LOG.error(StringUtils.stringifyException(e));
- try {
- Thread.sleep(10 * 1000);
- } catch (InterruptedException ex) {
- }
- }
- }
- }
-
- HTablePool getTablePool() {
- return pool;
- }
-
- ZooKeeperWrapper getZooKeeperWrapper() {
- return wrapper;
- }
-
- Configuration getConfiguration() {
- return conf;
- }
-
- StargateMetrics getMetrics() {
- return metrics;
- }
-
- void addConnectorAddress(String host, int port) {
- connectors.add(new Pair(host, port));
- }
-
- /**
- * @param tableName the table name
- * @return the maximum cache age suitable for use with this table, in
- * seconds
- * @throws IOException
- */
- public int getMaxAge(String tableName) throws IOException {
- Integer i = maxAgeMap.get(tableName);
- if (i != null) {
- return i.intValue();
- }
- HTableInterface table = pool.getTable(tableName);
- try {
- int maxAge = DEFAULT_MAX_AGE;
- for (HColumnDescriptor family :
- table.getTableDescriptor().getFamilies()) {
- int ttl = family.getTimeToLive();
- if (ttl < 0) {
- continue;
- }
- if (ttl < maxAge) {
- maxAge = ttl;
- }
- }
- maxAgeMap.put(tableName, maxAge);
- return maxAge;
- } finally {
- pool.putTable(table);
- }
- }
-
- /**
- * Signal that a previously calculated maximum cache age has been
- * invalidated by a schema change.
- * @param tableName the table name
- */
- public void invalidateMaxAge(String tableName) {
- maxAgeMap.remove(tableName);
- }
-
- /**
- * @return true if the servlet should operate in multiuser mode
- */
- public boolean isMultiUser() {
- return multiuser;
- }
-
- /**
- * @param flag true if the servlet should operate in multiuser mode
- */
- public void setMultiUser(boolean multiuser) {
- this.multiuser = multiuser;
- }
-
- /**
- * @return an authenticator
- */
- public Authenticator getAuthenticator() {
- if (authenticator == null) {
- String className = conf.get(AUTHENTICATOR_KEY,
- HBCAuthenticator.class.getCanonicalName());
- try {
- Class> c = getClass().getClassLoader().loadClass(className);
- if (className.endsWith(HBCAuthenticator.class.getName()) ||
- className.endsWith(HTableAuthenticator.class.getName()) ||
- className.endsWith(JDBCAuthenticator.class.getName())) {
- Constructor> cons = c.getConstructor(Configuration.class);
- authenticator = (Authenticator)
- cons.newInstance(new Object[] { conf });
- } else if (className.endsWith(ZooKeeperAuthenticator.class.getName())) {
- Constructor> cons = c.getConstructor(Configuration.class,
- ZooKeeperWrapper.class);
- authenticator = (Authenticator)
- cons.newInstance(new Object[] { conf, wrapper });
- } else {
- authenticator = (Authenticator)c.newInstance();
- }
- } catch (Exception e) {
- LOG.error(StringUtils.stringifyException(e));
- }
- if (authenticator == null) {
- authenticator = new HBCAuthenticator(conf);
- }
- }
- return authenticator;
- }
-
- /**
- * @param authenticator
- */
- public void setAuthenticator(Authenticator authenticator) {
- this.authenticator = authenticator;
- }
-
- /**
- * Check if the user has exceeded their request token limit within the
- * current interval
- * @param user the user
- * @param want the number of tokens desired
- * @throws IOException
- */
- public boolean userRequestLimit(final User user, int want)
- throws IOException {
- UserData ud = SoftUserData.get(user);
- HTableTokenBucket tb = (HTableTokenBucket) ud.get(UserData.TOKENBUCKET);
- if (tb == null) {
- tb = new HTableTokenBucket(conf, Bytes.toBytes(user.getToken()));
- ud.put(UserData.TOKENBUCKET, tb);
- }
- if (tb.available() < want) {
- return false;
- }
- tb.remove(want);
- return true;
- }
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RegionsResource.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RegionsResource.java
deleted file mode 100644
index 1c57aa5..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RegionsResource.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.Map;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Produces;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import javax.ws.rs.core.Response.ResponseBuilder;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HServerAddress;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.HTablePool;
-import org.apache.hadoop.hbase.stargate.User;
-import org.apache.hadoop.hbase.stargate.model.TableInfoModel;
-import org.apache.hadoop.hbase.stargate.model.TableRegionModel;
-
-public class RegionsResource implements Constants {
- private static final Log LOG = LogFactory.getLog(RegionsResource.class);
-
- User user;
- String table;
- CacheControl cacheControl;
- RESTServlet servlet;
-
- public RegionsResource(User user, String table) throws IOException {
- if (user != null) {
- if (!user.isAdmin()) {
- throw new WebApplicationException(Response.Status.FORBIDDEN);
- }
- this.user = user;
- }
- this.table = table;
- cacheControl = new CacheControl();
- cacheControl.setNoCache(true);
- cacheControl.setNoTransform(false);
- servlet = RESTServlet.getInstance();
- }
-
- private Map getTableRegions()
- throws IOException {
- HTablePool pool = servlet.getTablePool();
- HTable table = (HTable) pool.getTable(this.table);
- try {
- return table.getRegionsInfo();
- } finally {
- pool.putTable(table);
- }
- }
-
- @GET
- @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
- public Response get(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- try {
- TableInfoModel model = new TableInfoModel(table);
- Map regions = getTableRegions();
- for (Map.Entry e: regions.entrySet()) {
- HRegionInfo hri = e.getKey();
- HServerAddress addr = e.getValue();
- InetSocketAddress sa = addr.getInetSocketAddress();
- model.add(
- new TableRegionModel(table, hri.getRegionId(), hri.getStartKey(),
- hri.getEndKey(),
- sa.getHostName() + ":" + Integer.valueOf(sa.getPort())));
- }
- ResponseBuilder response = Response.ok(model);
- response.cacheControl(cacheControl);
- return response.build();
- } catch (TableNotFoundException e) {
- throw new WebApplicationException(Response.Status.NOT_FOUND);
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.SERVICE_UNAVAILABLE);
- }
- }
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ResourceConfig.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ResourceConfig.java
deleted file mode 100644
index 37fedcb..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ResourceConfig.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import com.sun.jersey.api.core.PackagesResourceConfig;
-
-public class ResourceConfig extends PackagesResourceConfig {
- public ResourceConfig() {
- super("org.apache.hadoop.hbase.stargate");
- }
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ResultGenerator.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ResultGenerator.java
deleted file mode 100644
index 7df141d..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ResultGenerator.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-import java.util.Iterator;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.stargate.model.ScannerModel;
-
-import org.json.JSONObject;
-
-public abstract class ResultGenerator implements Iterator {
-
- public static ResultGenerator fromRowSpec(final String table,
- final RowSpec rowspec, final Filter filter) throws IOException {
- if (rowspec.isSingleRow()) {
- return new RowResultGenerator(table, rowspec, filter);
- } else {
- return new ScannerResultGenerator(table, rowspec, filter);
- }
- }
-
- public static Filter buildFilter(final String filter) throws Exception {
- return ScannerModel.buildFilter(new JSONObject(filter));
- }
-
- public abstract void putBack(KeyValue kv);
-
- public abstract void close();
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RootResource.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RootResource.java
deleted file mode 100644
index 90dd6d5..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RootResource.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import javax.ws.rs.core.Response.ResponseBuilder;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.stargate.User;
-import org.apache.hadoop.hbase.stargate.model.TableListModel;
-import org.apache.hadoop.hbase.stargate.model.TableModel;
-
-@Path("/")
-public class RootResource implements Constants {
- private static final Log LOG = LogFactory.getLog(RootResource.class);
-
- RESTServlet servlet;
- CacheControl cacheControl;
-
- public RootResource() throws IOException {
- servlet = RESTServlet.getInstance();
- cacheControl = new CacheControl();
- cacheControl.setNoCache(true);
- cacheControl.setNoTransform(false);
- }
-
- private final User auth(final String token) throws IOException {
- User user = servlet.getAuthenticator().getUserForToken(token);
- if (user == null || user.isDisabled()) {
- throw new WebApplicationException(Response.Status.FORBIDDEN);
- }
- return user;
- }
-
- private final TableListModel getTableList() throws IOException {
- TableListModel tableList = new TableListModel();
- HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
- HTableDescriptor[] list = admin.listTables();
- for (HTableDescriptor htd: list) {
- tableList.add(new TableModel(htd.getNameAsString()));
- }
- return tableList;
- }
-
- private final TableListModel getTableListForUser(final User user)
- throws IOException {
- TableListModel tableList;
- if (user.isAdmin()) {
- tableList = getTableList();
- } else {
- tableList = new TableListModel();
- HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
- HTableDescriptor[] list = admin.listTables();
- String prefix = user.getName() + ".";
- for (HTableDescriptor htd: list) {
- String name = htd.getNameAsString();
- if (!name.startsWith(prefix)) {
- continue;
- }
- tableList.add(new TableModel(name.substring(prefix.length())));
- }
- }
- return tableList;
- }
-
- @GET
- @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
- public Response get(final @Context UriInfo uriInfo) throws IOException {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- if (servlet.isMultiUser()) {
- throw new WebApplicationException(Response.Status.BAD_REQUEST);
- }
- try {
- ResponseBuilder response = Response.ok(getTableList());
- response.cacheControl(cacheControl);
- return response.build();
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.SERVICE_UNAVAILABLE);
- }
- }
-
- @Path("status/cluster")
- public StorageClusterStatusResource getClusterStatusResource()
- throws IOException {
- if (servlet.isMultiUser()) {
- throw new WebApplicationException(Response.Status.BAD_REQUEST);
- }
- return new StorageClusterStatusResource();
- }
-
- @Path("version")
- public VersionResource getVersionResource() throws IOException {
- return new VersionResource();
- }
-
- @Path("{token: [0-9a-fA-F]{32} }") // 128 bit md5 sums
- public Response getTableRootResource(
- final @PathParam("token") String token) throws IOException {
- if (servlet.isMultiUser()) {
- User user = auth(token);
- if (!servlet.userRequestLimit(user, 1)) {
- throw new WebApplicationException(Response.status(509).build());
- }
- try {
- ResponseBuilder response = Response.ok(getTableListForUser(user));
- response.cacheControl(cacheControl);
- return response.build();
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.SERVICE_UNAVAILABLE);
- }
- }
- throw new WebApplicationException(Response.Status.BAD_REQUEST);
- }
-
- @Path("{token: [0-9a-fA-F]{32} }/status/cluster") // 128 bit md5 sums
- public StorageClusterStatusResource getClusterStatusResourceAuthorized(
- final @PathParam("token") String token) throws IOException {
- if (servlet.isMultiUser()) {
- User user = auth(token);
- if (user.isAdmin()) {
- if (!servlet.userRequestLimit(user, 1)) {
- throw new WebApplicationException(Response.status(509).build());
- }
- return new StorageClusterStatusResource();
- }
- throw new WebApplicationException(Response.Status.FORBIDDEN);
- }
- throw new WebApplicationException(Response.Status.BAD_REQUEST);
- }
-
- @Path("{token: [0-9a-fA-F]{32} }/{table}")
- public TableResource getTableResource(
- final @PathParam("token") String token,
- final @PathParam("table") String table) throws IOException {
- if (servlet.isMultiUser()) {
- User user = auth(token);
- if (!servlet.userRequestLimit(user, 1)) {
- throw new WebApplicationException(Response.status(509).build());
- }
- return new TableResource(user, table);
- }
- throw new WebApplicationException(Response.Status.BAD_REQUEST);
- }
-
- @Path("{table}")
- public TableResource getTableResource(
- final @PathParam("table") String table) throws IOException {
- if (servlet.isMultiUser()) {
- throw new WebApplicationException(Response.Status.BAD_REQUEST);
- }
- return new TableResource(null, table);
- }
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RowResource.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RowResource.java
deleted file mode 100644
index 7d5f136..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RowResource.java
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-import java.net.URLDecoder;
-import java.util.List;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.DELETE;
-import javax.ws.rs.GET;
-import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Produces;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import javax.ws.rs.core.Response.ResponseBuilder;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.HTablePool;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.stargate.User;
-import org.apache.hadoop.hbase.stargate.model.CellModel;
-import org.apache.hadoop.hbase.stargate.model.CellSetModel;
-import org.apache.hadoop.hbase.stargate.model.RowModel;
-import org.apache.hadoop.hbase.util.Bytes;
-
-public class RowResource implements Constants {
- private static final Log LOG = LogFactory.getLog(RowResource.class);
-
- User user;
- String tableName;
- String actualTableName;
- RowSpec rowspec;
- CacheControl cacheControl;
- RESTServlet servlet;
-
- public RowResource(User user, String table, String rowspec, String versions)
- throws IOException {
- this.user = user;
- if (user != null) {
- this.actualTableName =
- !user.isAdmin() ? user.getName() + "." + table : table;
- } else {
- this.actualTableName = table;
- }
- this.tableName = table;
- this.rowspec = new RowSpec(URLDecoder.decode(rowspec,
- HConstants.UTF8_ENCODING));
- if (versions != null) {
- this.rowspec.setMaxVersions(Integer.valueOf(versions));
- }
- this.servlet = RESTServlet.getInstance();
- cacheControl = new CacheControl();
- cacheControl.setMaxAge(servlet.getMaxAge(table));
- cacheControl.setNoTransform(false);
- }
-
- @GET
- @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
- public Response get(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- try {
- ResultGenerator generator =
- ResultGenerator.fromRowSpec(actualTableName, rowspec, null);
- if (!generator.hasNext()) {
- throw new WebApplicationException(Response.Status.NOT_FOUND);
- }
- CellSetModel model = new CellSetModel();
- KeyValue value = generator.next();
- byte[] rowKey = value.getRow();
- RowModel rowModel = new RowModel(rowKey);
- do {
- if (!Bytes.equals(value.getRow(), rowKey)) {
- model.addRow(rowModel);
- rowKey = value.getRow();
- rowModel = new RowModel(rowKey);
- }
- rowModel.addCell(
- new CellModel(value.getFamily(), value.getQualifier(),
- value.getTimestamp(), value.getValue()));
- value = generator.next();
- } while (value != null);
- model.addRow(rowModel);
- ResponseBuilder response = Response.ok(model);
- response.cacheControl(cacheControl);
- return response.build();
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.SERVICE_UNAVAILABLE);
- }
- }
-
- @GET
- @Produces(MIMETYPE_BINARY)
- public Response getBinary(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
- }
- servlet.getMetrics().incrementRequests(1);
- // doesn't make sense to use a non specific coordinate as this can only
- // return a single cell
- if (!rowspec.hasColumns() || rowspec.getColumns().length > 1) {
- throw new WebApplicationException(Response.Status.BAD_REQUEST);
- }
- try {
- ResultGenerator generator =
- ResultGenerator.fromRowSpec(actualTableName, rowspec, null);
- if (!generator.hasNext()) {
- throw new WebApplicationException(Response.Status.NOT_FOUND);
- }
- KeyValue value = generator.next();
- ResponseBuilder response = Response.ok(value.getValue());
- response.cacheControl(cacheControl);
- response.header("X-Timestamp", value.getTimestamp());
- return response.build();
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.SERVICE_UNAVAILABLE);
- }
- }
-
- Response update(final CellSetModel model, final boolean replace) {
- servlet.getMetrics().incrementRequests(1);
- HTablePool pool = servlet.getTablePool();
- HTableInterface table = null;
- try {
- List rows = model.getRows();
- // the user request limit is a transaction limit, so we need to
- // account for updates by row
- if (user != null && !servlet.userRequestLimit(user, rows.size())) {
- throw new WebApplicationException(Response.status(509).build());
- }
- table = pool.getTable(actualTableName);
- for (RowModel row: rows) {
- byte[] key = row.getKey();
- Put put = new Put(key);
- for (CellModel cell: row.getCells()) {
- byte [][] parts = KeyValue.parseColumn(cell.getColumn());
- if (parts.length == 2 && parts[1].length > 0) {
- put.add(parts[0], parts[1], cell.getTimestamp(), cell.getValue());
- } else {
- put.add(parts[0], null, cell.getTimestamp(), cell.getValue());
- }
- }
- table.put(put);
- if (LOG.isDebugEnabled()) {
- LOG.debug("PUT " + put.toString());
- }
- }
- table.flushCommits();
- ResponseBuilder response = Response.ok();
- return response.build();
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.SERVICE_UNAVAILABLE);
- } finally {
- if (table != null) {
- pool.putTable(table);
- }
- }
- }
-
- // This currently supports only update of one row at a time.
- Response updateBinary(final byte[] message, final HttpHeaders headers,
- final boolean replace) {
- servlet.getMetrics().incrementRequests(1);
- HTablePool pool = servlet.getTablePool();
- HTableInterface table = null;
- try {
- byte[] row = rowspec.getRow();
- byte[][] columns = rowspec.getColumns();
- byte[] column = null;
- if (columns != null) {
- column = columns[0];
- }
- long timestamp = HConstants.LATEST_TIMESTAMP;
- List vals = headers.getRequestHeader("X-Row");
- if (vals != null && !vals.isEmpty()) {
- row = Bytes.toBytes(vals.get(0));
- }
- vals = headers.getRequestHeader("X-Column");
- if (vals != null && !vals.isEmpty()) {
- column = Bytes.toBytes(vals.get(0));
- }
- vals = headers.getRequestHeader("X-Timestamp");
- if (vals != null && !vals.isEmpty()) {
- timestamp = Long.valueOf(vals.get(0));
- }
- if (column == null) {
- throw new WebApplicationException(Response.Status.BAD_REQUEST);
- }
- Put put = new Put(row);
- byte parts[][] = KeyValue.parseColumn(column);
- if (parts.length == 2 && parts[1].length > 0) {
- put.add(parts[0], parts[1], timestamp, message);
- } else {
- put.add(parts[0], null, timestamp, message);
- }
- table = pool.getTable(actualTableName);
- table.put(put);
- if (LOG.isDebugEnabled()) {
- LOG.debug("PUT " + put.toString());
- }
- table.flushCommits();
- return Response.ok().build();
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.SERVICE_UNAVAILABLE);
- } finally {
- if (table != null) {
- pool.putTable(table);
- }
- }
- }
-
- @PUT
- @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
- public Response put(final CellSetModel model,
- final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("PUT " + uriInfo.getAbsolutePath());
- }
- return update(model, true);
- }
-
- @PUT
- @Consumes(MIMETYPE_BINARY)
- public Response putBinary(final byte[] message,
- final @Context UriInfo uriInfo, final @Context HttpHeaders headers)
- {
- if (LOG.isDebugEnabled()) {
- LOG.debug("PUT " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
- }
- return updateBinary(message, headers, true);
- }
-
- @POST
- @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
- public Response post(final CellSetModel model,
- final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("POST " + uriInfo.getAbsolutePath());
- }
- return update(model, false);
- }
-
- @POST
- @Consumes(MIMETYPE_BINARY)
- public Response postBinary(final byte[] message,
- final @Context UriInfo uriInfo, final @Context HttpHeaders headers) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("POST " + uriInfo.getAbsolutePath() + " as "+MIMETYPE_BINARY);
- }
- return updateBinary(message, headers, false);
- }
-
- @DELETE
- public Response delete(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("DELETE " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- Delete delete = null;
- if (rowspec.hasTimestamp())
- delete = new Delete(rowspec.getRow(), rowspec.getTimestamp(), null);
- else
- delete = new Delete(rowspec.getRow());
-
- for (byte[] column: rowspec.getColumns()) {
- byte[][] split = KeyValue.parseColumn(column);
- if (rowspec.hasTimestamp()) {
- if (split.length == 2 && split[1].length != 0) {
- delete.deleteColumns(split[0], split[1], rowspec.getTimestamp());
- } else {
- delete.deleteFamily(split[0], rowspec.getTimestamp());
- }
- } else {
- if (split.length == 2 && split[1].length != 0) {
- delete.deleteColumns(split[0], split[1]);
- } else {
- delete.deleteFamily(split[0]);
- }
- }
- }
- HTablePool pool = servlet.getTablePool();
- HTableInterface table = null;
- try {
- table = pool.getTable(actualTableName);
- table.delete(delete);
- if (LOG.isDebugEnabled()) {
- LOG.debug("DELETE " + delete.toString());
- }
- table.flushCommits();
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.SERVICE_UNAVAILABLE);
- } finally {
- if (table != null) {
- pool.putTable(table);
- }
- }
- return Response.ok().build();
- }
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RowResultGenerator.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RowResultGenerator.java
deleted file mode 100644
index 4e40084..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RowResultGenerator.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-import java.util.Iterator;
-import java.util.NoSuchElementException;
-
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.HTablePool;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.filter.Filter;
-
-public class RowResultGenerator extends ResultGenerator {
- private Iterator valuesI;
- private KeyValue cache;
-
- public RowResultGenerator(final String tableName, final RowSpec rowspec,
- final Filter filter) throws IllegalArgumentException, IOException {
- HTablePool pool = RESTServlet.getInstance().getTablePool();
- HTableInterface table = pool.getTable(tableName);
- try {
- Get get = new Get(rowspec.getRow());
- if (rowspec.hasColumns()) {
- for (byte[] col: rowspec.getColumns()) {
- byte[][] split = KeyValue.parseColumn(col);
- if (split.length == 2 && split[1].length != 0) {
- get.addColumn(split[0], split[1]);
- } else {
- get.addFamily(split[0]);
- }
- }
- } else {
- // rowspec does not explicitly specify columns, return them all
- for (HColumnDescriptor family:
- table.getTableDescriptor().getFamilies()) {
- get.addFamily(family.getName());
- }
- }
- get.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
- get.setMaxVersions(rowspec.getMaxVersions());
- if (filter != null) {
- get.setFilter(filter);
- }
- Result result = table.get(get);
- if (result != null && !result.isEmpty()) {
- valuesI = result.list().iterator();
- }
- } finally {
- pool.putTable(table);
- }
- }
-
- public void close() {
- }
-
- public boolean hasNext() {
- if (cache != null) {
- return true;
- }
- if (valuesI == null) {
- return false;
- }
- return valuesI.hasNext();
- }
-
- public KeyValue next() {
- if (cache != null) {
- KeyValue kv = cache;
- cache = null;
- return kv;
- }
- if (valuesI == null) {
- return null;
- }
- try {
- return valuesI.next();
- } catch (NoSuchElementException e) {
- return null;
- }
- }
-
- public void putBack(KeyValue kv) {
- this.cache = kv;
- }
-
- public void remove() {
- throw new UnsupportedOperationException("remove not supported");
- }
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RowSpec.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RowSpec.java
deleted file mode 100644
index c577e79..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/RowSpec.java
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.util.Collection;
-import java.util.TreeSet;
-
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * Parses a path based row/column/timestamp specification into its component
- * elements.
- *
- *
- */
-public class RowSpec {
- public static final long DEFAULT_START_TIMESTAMP = 0;
- public static final long DEFAULT_END_TIMESTAMP = Long.MAX_VALUE;
-
- private byte[] row = HConstants.EMPTY_START_ROW;
- private byte[] endRow = null;
- private TreeSet columns =
- new TreeSet(Bytes.BYTES_COMPARATOR);
- private long startTime = DEFAULT_START_TIMESTAMP;
- private long endTime = DEFAULT_END_TIMESTAMP;
- private int maxVersions = HColumnDescriptor.DEFAULT_VERSIONS;
-
- public RowSpec(String path) throws IllegalArgumentException {
- int i = 0;
- while (path.charAt(i) == '/') {
- i++;
- }
- i = parseRowKeys(path, i);
- i = parseColumns(path, i);
- i = parseTimestamp(path, i);
- }
-
- private int parseRowKeys(final String path, int i)
- throws IllegalArgumentException {
- StringBuilder startRow = new StringBuilder();
- StringBuilder endRow = null;
- try {
- char c;
- boolean doEndRow = false;
- while (i < path.length() && (c = path.charAt(i)) != '/') {
- if (c == ',') {
- doEndRow = true;
- i++;
- break;
- }
- startRow.append(c);
- i++;
- }
- i++;
- this.row = Bytes.toBytes(startRow.toString());
- if (doEndRow) {
- endRow = new StringBuilder();
- while ((c = path.charAt(i)) != '/') {
- endRow.append(c);
- i++;
- }
- i++;
- }
- } catch (IndexOutOfBoundsException e) {
- throw new IllegalArgumentException(e);
- }
- // HBase does not support wildcards on row keys so we will emulate a
- // suffix glob by synthesizing appropriate start and end row keys for
- // table scanning
- if (startRow.charAt(startRow.length() - 1) == '*') {
- if (endRow != null)
- throw new IllegalArgumentException("invalid path: start row "+
- "specified with wildcard");
- this.row = Bytes.toBytes(startRow.substring(0,
- startRow.lastIndexOf("*")));
- this.endRow = new byte[this.row.length + 1];
- System.arraycopy(this.row, 0, this.endRow, 0, this.row.length);
- this.endRow[this.row.length] = (byte)255;
- } else {
- this.row = Bytes.toBytes(startRow.toString());
- if (endRow != null) {
- this.endRow = Bytes.toBytes(endRow.toString());
- }
- }
- return i;
- }
-
- private int parseColumns(final String path, int i)
- throws IllegalArgumentException {
- if (i >= path.length()) {
- return i;
- }
- try {
- char c;
- StringBuilder column = new StringBuilder();
- boolean hasColon = false;
- while (i < path.length() && (c = path.charAt(i)) != '/') {
- if (c == ',') {
- if (column.length() < 1) {
- throw new IllegalArgumentException("invalid path");
- }
- if (!hasColon) {
- column.append(':');
- }
- this.columns.add(Bytes.toBytes(column.toString()));
- column = new StringBuilder();
- hasColon = false;
- i++;
- continue;
- }
- if (c == ':') {
- hasColon = true;
- }
- column.append(c);
- i++;
- }
- i++;
- // trailing list entry
- if (column.length() > 1) {
- if (!hasColon) {
- column.append(':');
- }
- this.columns.add(Bytes.toBytes(column.toString()));
- }
- } catch (IndexOutOfBoundsException e) {
- throw new IllegalArgumentException(e);
- }
- return i;
- }
-
- private int parseTimestamp(final String path, int i)
- throws IllegalArgumentException {
- if (i >= path.length()) {
- return i;
- }
- long time0 = 0, time1 = 0;
- try {
- char c = 0;
- StringBuilder stamp = new StringBuilder();
- while (i < path.length()) {
- c = path.charAt(i);
- if (c == '/' || c == ',') {
- break;
- }
- stamp.append(c);
- i++;
- }
- try {
- time0 = Long.valueOf(stamp.toString());
- } catch (NumberFormatException e) {
- throw new IllegalArgumentException(e);
- }
- if (c == ',') {
- stamp = new StringBuilder();
- i++;
- while (i < path.length() && ((c = path.charAt(i)) != '/')) {
- stamp.append(c);
- i++;
- }
- try {
- time1 = Long.valueOf(stamp.toString());
- } catch (NumberFormatException e) {
- throw new IllegalArgumentException(e);
- }
- }
- if (c == '/') {
- i++;
- }
- } catch (IndexOutOfBoundsException e) {
- throw new IllegalArgumentException(e);
- }
- if (time1 != 0) {
- startTime = time0;
- endTime = time1;
- } else {
- endTime = time0;
- }
- return i;
- }
-
- public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns,
- long startTime, long endTime, int maxVersions) {
- this.row = startRow;
- this.endRow = endRow;
- if (columns != null) {
- for (byte[] col: columns) {
- this.columns.add(col);
- }
- }
- this.startTime = startTime;
- this.endTime = endTime;
- this.maxVersions = maxVersions;
- }
-
- public RowSpec(byte[] startRow, byte[] endRow, Collection columns,
- long startTime, long endTime, int maxVersions) {
- this.row = startRow;
- this.endRow = endRow;
- if (columns != null) {
- this.columns.addAll(columns);
- }
- this.startTime = startTime;
- this.endTime = endTime;
- this.maxVersions = maxVersions;
- }
-
- public boolean isSingleRow() {
- return endRow == null;
- }
-
- public int getMaxVersions() {
- return maxVersions;
- }
-
- public void setMaxVersions(final int maxVersions) {
- this.maxVersions = maxVersions;
- }
-
- public boolean hasColumns() {
- return !columns.isEmpty();
- }
-
- public byte[] getRow() {
- return row;
- }
-
- public byte[] getStartRow() {
- return row;
- }
-
- public boolean hasEndRow() {
- return endRow != null;
- }
-
- public byte[] getEndRow() {
- return endRow;
- }
-
- public void addColumn(final byte[] column) {
- columns.add(column);
- }
-
- public byte[][] getColumns() {
- return columns.toArray(new byte[columns.size()][]);
- }
-
- public boolean hasTimestamp() {
- return (startTime == 0) && (endTime != Long.MAX_VALUE);
- }
-
- public long getTimestamp() {
- return endTime;
- }
-
- public long getStartTime() {
- return startTime;
- }
-
- public void setStartTime(final long startTime) {
- this.startTime = startTime;
- }
-
- public long getEndTime() {
- return endTime;
- }
-
- public void setEndTime(long endTime) {
- this.endTime = endTime;
- }
-
- public String toString() {
- StringBuilder result = new StringBuilder();
- result.append("{startRow => '");
- if (row != null) {
- result.append(Bytes.toString(row));
- }
- result.append("', endRow => '");
- if (endRow != null) {
- result.append(Bytes.toString(endRow));
- }
- result.append("', columns => [");
- for (byte[] col: columns) {
- result.append(" '");
- result.append(Bytes.toString(col));
- result.append("'");
- }
- result.append(" ], startTime => ");
- result.append(Long.toString(startTime));
- result.append(", endTime => ");
- result.append(Long.toString(endTime));
- result.append(", maxVersions => ");
- result.append(Integer.toString(maxVersions));
- result.append("}");
- return result.toString();
- }
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ScannerInstanceResource.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ScannerInstanceResource.java
deleted file mode 100644
index 92a5b7e..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ScannerInstanceResource.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-
-import javax.ws.rs.DELETE;
-import javax.ws.rs.GET;
-import javax.ws.rs.Produces;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.ResponseBuilder;
-import javax.ws.rs.core.UriInfo;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.stargate.model.CellModel;
-import org.apache.hadoop.hbase.stargate.model.CellSetModel;
-import org.apache.hadoop.hbase.stargate.model.RowModel;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import com.sun.jersey.core.util.Base64;
-
-public class ScannerInstanceResource implements Constants {
- private static final Log LOG =
- LogFactory.getLog(ScannerInstanceResource.class);
-
- User user;
- ResultGenerator generator;
- String id;
- int batch;
- RESTServlet servlet;
- CacheControl cacheControl;
-
- public ScannerInstanceResource(User user, String table, String id,
- ResultGenerator generator, int batch) throws IOException {
- this.user = user;
- this.id = id;
- this.generator = generator;
- this.batch = batch;
- servlet = RESTServlet.getInstance();
- cacheControl = new CacheControl();
- cacheControl.setNoCache(true);
- cacheControl.setNoTransform(false);
- }
-
- @GET
- @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
- public Response get(final @Context UriInfo uriInfo) throws IOException {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- CellSetModel model = new CellSetModel();
- RowModel rowModel = null;
- byte[] rowKey = null;
- int count = batch;
- do {
- KeyValue value = null;
- try {
- value = generator.next();
- } catch (IllegalStateException e) {
- ScannerResource.delete(id);
- throw new WebApplicationException(Response.Status.GONE);
- }
- if (value == null) {
- LOG.info("generator exhausted");
- // respond with 204 (No Content) if an empty cell set would be
- // returned
- if (count == batch) {
- return Response.noContent().build();
- }
- break;
- }
- if (rowKey == null) {
- rowKey = value.getRow();
- rowModel = new RowModel(rowKey);
- }
- if (!Bytes.equals(value.getRow(), rowKey)) {
- // the user request limit is a transaction limit, so we need to
- // account for scanner.next()
- if (user != null && !servlet.userRequestLimit(user, 1)) {
- generator.putBack(value);
- break;
- }
- model.addRow(rowModel);
- rowKey = value.getRow();
- rowModel = new RowModel(rowKey);
- }
- rowModel.addCell(
- new CellModel(value.getFamily(), value.getQualifier(),
- value.getTimestamp(), value.getValue()));
- } while (--count > 0);
- model.addRow(rowModel);
- ResponseBuilder response = Response.ok(model);
- response.cacheControl(cacheControl);
- return response.build();
- }
-
- @GET
- @Produces(MIMETYPE_BINARY)
- public Response getBinary(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath() + " as " +
- MIMETYPE_BINARY);
- }
- servlet.getMetrics().incrementRequests(1);
- try {
- KeyValue value = generator.next();
- if (value == null) {
- LOG.info("generator exhausted");
- return Response.noContent().build();
- }
- ResponseBuilder response = Response.ok(value.getValue());
- response.cacheControl(cacheControl);
- response.header("X-Row", Base64.encode(value.getRow()));
- response.header("X-Column",
- Base64.encode(
- KeyValue.makeColumn(value.getFamily(), value.getQualifier())));
- response.header("X-Timestamp", value.getTimestamp());
- return response.build();
- } catch (IllegalStateException e) {
- ScannerResource.delete(id);
- throw new WebApplicationException(Response.Status.GONE);
- }
- }
-
- @DELETE
- public Response delete(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("DELETE " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- ScannerResource.delete(id);
- return Response.ok().build();
- }
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ScannerResource.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ScannerResource.java
deleted file mode 100644
index 41c71a3..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ScannerResource.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.HashMap;
-import java.util.Map;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriBuilder;
-import javax.ws.rs.core.UriInfo;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.filter.Filter;
-
-import org.apache.hadoop.hbase.stargate.User;
-import org.apache.hadoop.hbase.stargate.model.ScannerModel;
-
-public class ScannerResource implements Constants {
-
- private static final Log LOG = LogFactory.getLog(ScannerResource.class);
-
- static final Map scanners =
- new HashMap();
-
- User user;
- String tableName;
- String actualTableName;
- RESTServlet servlet;
-
- public ScannerResource(User user, String table) throws IOException {
- if (user != null) {
- this.user = user;
- this.actualTableName =
- !user.isAdmin() ? user.getName() + "." + table : table;
- } else {
- this.actualTableName = table;
- }
- this.tableName = table;
- servlet = RESTServlet.getInstance();
- }
-
- static void delete(final String id) {
- synchronized (scanners) {
- ScannerInstanceResource instance = scanners.remove(id);
- if (instance != null) {
- instance.generator.close();
- }
- }
- }
-
- Response update(final ScannerModel model, final boolean replace,
- final UriInfo uriInfo) {
- servlet.getMetrics().incrementRequests(1);
- byte[] endRow = model.hasEndRow() ? model.getEndRow() : null;
- RowSpec spec = new RowSpec(model.getStartRow(), endRow,
- model.getColumns(), model.getStartTime(), model.getEndTime(), 1);
- try {
- Filter filter = ScannerResultGenerator.buildFilterFromModel(model);
- ScannerResultGenerator gen =
- new ScannerResultGenerator(actualTableName, spec, filter);
- String id = gen.getID();
- ScannerInstanceResource instance =
- new ScannerInstanceResource(user, actualTableName, id, gen,
- model.getBatch());
- synchronized (scanners) {
- scanners.put(id, instance);
- }
- if (LOG.isDebugEnabled()) {
- LOG.debug("new scanner: " + id);
- }
- UriBuilder builder = uriInfo.getAbsolutePathBuilder();
- URI uri = builder.path(id).build();
- return Response.created(uri).build();
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.SERVICE_UNAVAILABLE);
- } catch (Exception e) {
- throw new WebApplicationException(e, Response.Status.BAD_REQUEST);
- }
- }
-
- @PUT
- @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
- public Response put(final ScannerModel model,
- final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("PUT " + uriInfo.getAbsolutePath());
- }
- return update(model, true, uriInfo);
- }
-
- @POST
- @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
- public Response post(final ScannerModel model,
- final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("POST " + uriInfo.getAbsolutePath());
- }
- return update(model, false, uriInfo);
- }
-
- @Path("{scanner: .+}")
- public ScannerInstanceResource getScannerInstanceResource(
- final @PathParam("scanner") String id) {
- synchronized (scanners) {
- ScannerInstanceResource instance = scanners.get(id);
- if (instance == null) {
- throw new WebApplicationException(Response.Status.NOT_FOUND);
- }
- return instance;
- }
- }
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ScannerResultGenerator.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ScannerResultGenerator.java
deleted file mode 100644
index b2f25d8..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/ScannerResultGenerator.java
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-import java.util.Iterator;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.UnknownScannerException;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.HTablePool;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.stargate.model.ScannerModel;
-import org.apache.hadoop.util.StringUtils;
-
-public class ScannerResultGenerator extends ResultGenerator {
-
- private static final Log LOG =
- LogFactory.getLog(ScannerResultGenerator.class);
-
- public static Filter buildFilterFromModel(final ScannerModel model)
- throws Exception {
- String filter = model.getFilter();
- if (filter == null || filter.length() == 0) {
- return null;
- }
- return buildFilter(filter);
- }
-
- private String id;
- private Iterator rowI;
- private KeyValue cache;
- private ResultScanner scanner;
- private Result cached;
-
- public ScannerResultGenerator(final String tableName, final RowSpec rowspec,
- final Filter filter) throws IllegalArgumentException, IOException {
- HTablePool pool = RESTServlet.getInstance().getTablePool();
- HTableInterface table = pool.getTable(tableName);
- try {
- Scan scan;
- if (rowspec.hasEndRow()) {
- scan = new Scan(rowspec.getStartRow(), rowspec.getEndRow());
- } else {
- scan = new Scan(rowspec.getStartRow());
- }
- if (rowspec.hasColumns()) {
- byte[][] columns = rowspec.getColumns();
- for (byte[] column: columns) {
- byte[][] split = KeyValue.parseColumn(column);
- if (split.length > 1 && (split[1] != null && split[1].length != 0)) {
- scan.addColumn(split[0], split[1]);
- } else {
- scan.addFamily(split[0]);
- }
- }
- } else {
- for (HColumnDescriptor family:
- table.getTableDescriptor().getFamilies()) {
- scan.addFamily(family.getName());
- }
- }
- scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime());
- scan.setMaxVersions(rowspec.getMaxVersions());
- if (filter != null) {
- scan.setFilter(filter);
- }
- // always disable block caching on the cluster when scanning
- scan.setCacheBlocks(false);
- scanner = table.getScanner(scan);
- cached = null;
- id = Long.toString(System.currentTimeMillis()) +
- Integer.toHexString(scanner.hashCode());
- } finally {
- pool.putTable(table);
- }
- }
-
- public String getID() {
- return id;
- }
-
- public void close() {
- }
-
- public boolean hasNext() {
- if (cache != null) {
- return true;
- }
- if (rowI != null && rowI.hasNext()) {
- return true;
- }
- if (cached != null) {
- return true;
- }
- try {
- Result result = scanner.next();
- if (result != null && !result.isEmpty()) {
- cached = result;
- }
- } catch (UnknownScannerException e) {
- throw new IllegalArgumentException(e);
- } catch (IOException e) {
- LOG.error(StringUtils.stringifyException(e));
- }
- return cached != null;
- }
-
- public KeyValue next() {
- if (cache != null) {
- KeyValue kv = cache;
- cache = null;
- return kv;
- }
- boolean loop;
- do {
- loop = false;
- if (rowI != null) {
- if (rowI.hasNext()) {
- return rowI.next();
- } else {
- rowI = null;
- }
- }
- if (cached != null) {
- rowI = cached.list().iterator();
- loop = true;
- cached = null;
- } else {
- Result result = null;
- try {
- result = scanner.next();
- } catch (UnknownScannerException e) {
- throw new IllegalArgumentException(e);
- } catch (IOException e) {
- LOG.error(StringUtils.stringifyException(e));
- }
- if (result != null && !result.isEmpty()) {
- rowI = result.list().iterator();
- loop = true;
- }
- }
- } while (loop);
- return null;
- }
-
- public void putBack(KeyValue kv) {
- this.cache = kv;
- }
-
- public void remove() {
- throw new UnsupportedOperationException("remove not supported");
- }
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/SchemaResource.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/SchemaResource.java
deleted file mode 100644
index f84f609..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/SchemaResource.java
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-import java.util.Map;
-
-import javax.ws.rs.Consumes;
-import javax.ws.rs.DELETE;
-import javax.ws.rs.GET;
-import javax.ws.rs.POST;
-import javax.ws.rs.PUT;
-import javax.ws.rs.Produces;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import javax.ws.rs.core.Response.ResponseBuilder;
-import javax.xml.namespace.QName;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.HTablePool;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.stargate.User;
-import org.apache.hadoop.hbase.stargate.model.ColumnSchemaModel;
-import org.apache.hadoop.hbase.stargate.model.TableSchemaModel;
-import org.apache.hadoop.hbase.util.Bytes;
-
-public class SchemaResource implements Constants {
- private static final Log LOG = LogFactory.getLog(SchemaResource.class);
-
- User user;
- String tableName;
- String actualTableName;
- CacheControl cacheControl;
- RESTServlet servlet;
-
- public SchemaResource(User user, String table) throws IOException {
- if (user != null) {
- this.user = user;
- this.actualTableName =
- !user.isAdmin() ? (user.getName() + "." + table) : table;
- } else {
- this.actualTableName = table;
- }
- this.tableName = table;
- servlet = RESTServlet.getInstance();
- cacheControl = new CacheControl();
- cacheControl.setNoCache(true);
- cacheControl.setNoTransform(false);
- }
-
- private HTableDescriptor getTableSchema() throws IOException,
- TableNotFoundException {
- HTablePool pool = servlet.getTablePool();
- HTableInterface table = pool.getTable(actualTableName);
- try {
- return table.getTableDescriptor();
- } finally {
- pool.putTable(table);
- }
- }
-
- @GET
- @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
- public Response get(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- try {
- HTableDescriptor htd = getTableSchema();
- TableSchemaModel model = new TableSchemaModel();
- model.setName(tableName);
- for (Map.Entry e:
- htd.getValues().entrySet()) {
- model.addAttribute(Bytes.toString(e.getKey().get()),
- Bytes.toString(e.getValue().get()));
- }
- for (HColumnDescriptor hcd: htd.getFamilies()) {
- ColumnSchemaModel columnModel = new ColumnSchemaModel();
- columnModel.setName(hcd.getNameAsString());
- for (Map.Entry e:
- hcd.getValues().entrySet()) {
- columnModel.addAttribute(Bytes.toString(e.getKey().get()),
- Bytes.toString(e.getValue().get()));
- }
- model.addColumnFamily(columnModel);
- }
- ResponseBuilder response = Response.ok(model);
- response.cacheControl(cacheControl);
- return response.build();
- } catch (TableNotFoundException e) {
- throw new WebApplicationException(Response.Status.NOT_FOUND);
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.SERVICE_UNAVAILABLE);
- }
- }
-
- private Response replace(final byte[] tableName,
- final TableSchemaModel model, final UriInfo uriInfo,
- final HBaseAdmin admin) {
- try {
- HTableDescriptor htd = new HTableDescriptor(tableName);
- for (Map.Entry e: model.getAny().entrySet()) {
- htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
- }
- for (ColumnSchemaModel family: model.getColumns()) {
- HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
- for (Map.Entry e: family.getAny().entrySet()) {
- hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
- }
- htd.addFamily(hcd);
- }
- if (admin.tableExists(tableName)) {
- admin.disableTable(tableName);
- admin.modifyTable(tableName, htd);
- admin.enableTable(tableName);
- } else try {
- admin.createTable(htd);
- } catch (TableExistsException e) {
- // race, someone else created a table with the same name
- throw new WebApplicationException(e, Response.Status.NOT_MODIFIED);
- }
- return Response.created(uriInfo.getAbsolutePath()).build();
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.SERVICE_UNAVAILABLE);
- }
- }
-
- private Response update(final byte[] tableName,final TableSchemaModel model,
- final UriInfo uriInfo, final HBaseAdmin admin) {
- try {
- HTableDescriptor htd = admin.getTableDescriptor(tableName);
- admin.disableTable(tableName);
- try {
- for (ColumnSchemaModel family: model.getColumns()) {
- HColumnDescriptor hcd = new HColumnDescriptor(family.getName());
- for (Map.Entry e: family.getAny().entrySet()) {
- hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
- }
- if (htd.hasFamily(hcd.getName())) {
- admin.modifyColumn(tableName, hcd.getName(), hcd);
- } else {
- admin.addColumn(model.getName(), hcd);
- }
- }
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.INTERNAL_SERVER_ERROR);
- } finally {
- admin.enableTable(tableName);
- }
- return Response.ok().build();
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.SERVICE_UNAVAILABLE);
- }
- }
-
- private Response update(final TableSchemaModel model, final boolean replace,
- final UriInfo uriInfo) {
- try {
- servlet.invalidateMaxAge(tableName);
- byte[] tableName = Bytes.toBytes(actualTableName);
- HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
- if (replace || !admin.tableExists(tableName)) {
- return replace(tableName, model, uriInfo, admin);
- } else {
- return update(tableName, model, uriInfo, admin);
- }
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.SERVICE_UNAVAILABLE);
- }
- }
-
- @PUT
- @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
- public Response put(final TableSchemaModel model,
- final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("PUT " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- // use the name given in the path, but warn if the name on the path and
- // the name in the schema are different
- if (model.getName() != tableName) {
- LOG.warn("table name mismatch: path='" + tableName + "', schema='" +
- model.getName() + "'");
- }
- return update(model, true, uriInfo);
- }
-
- @POST
- @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
- public Response post(final TableSchemaModel model,
- final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("PUT " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- // use the name given in the path, but warn if the name on the path and
- // the name in the schema are different
- if (model.getName() != tableName) {
- LOG.warn("table name mismatch: path='" + tableName + "', schema='" +
- model.getName() + "'");
- }
- return update(model, false, uriInfo);
- }
-
- @DELETE
- public Response delete(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("DELETE " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- try {
- HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
- admin.disableTable(actualTableName);
- admin.deleteTable(actualTableName);
- return Response.ok().build();
- } catch (TableNotFoundException e) {
- throw new WebApplicationException(Response.Status.NOT_FOUND);
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.SERVICE_UNAVAILABLE);
- }
- }
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/StorageClusterStatusResource.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/StorageClusterStatusResource.java
deleted file mode 100644
index 494b44c..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/StorageClusterStatusResource.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Produces;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.ResponseBuilder;
-import javax.ws.rs.core.UriInfo;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.HServerInfo;
-import org.apache.hadoop.hbase.HServerLoad;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.stargate.model.StorageClusterStatusModel;
-
-public class StorageClusterStatusResource implements Constants {
- private static final Log LOG =
- LogFactory.getLog(StorageClusterStatusResource.class);
-
- private CacheControl cacheControl;
- private RESTServlet servlet;
-
- public StorageClusterStatusResource() throws IOException {
- servlet = RESTServlet.getInstance();
- cacheControl = new CacheControl();
- cacheControl.setNoCache(true);
- cacheControl.setNoTransform(false);
- }
-
- @GET
- @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
- public Response get(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- try {
- HBaseAdmin admin = new HBaseAdmin(servlet.getConfiguration());
- ClusterStatus status = admin.getClusterStatus();
- StorageClusterStatusModel model = new StorageClusterStatusModel();
- model.setRegions(status.getRegionsCount());
- model.setRequests(status.getRequestsCount());
- model.setAverageLoad(status.getAverageLoad());
- for (HServerInfo info: status.getServerInfo()) {
- HServerLoad load = info.getLoad();
- StorageClusterStatusModel.Node node =
- model.addLiveNode(
- info.getServerAddress().getHostname() + ":" +
- Integer.toString(info.getServerAddress().getPort()),
- info.getStartCode(), load.getUsedHeapMB(),
- load.getMaxHeapMB());
- node.setRequests(load.getNumberOfRequests());
- for (HServerLoad.RegionLoad region: load.getRegionsLoad()) {
- node.addRegion(region.getName(), region.getStores(),
- region.getStorefiles(), region.getStorefileSizeMB(),
- region.getMemStoreSizeMB(), region.getStorefileIndexSizeMB());
- }
- }
- for (String name: status.getDeadServerNames()) {
- model.addDeadNode(name);
- }
- ResponseBuilder response = Response.ok(model);
- response.cacheControl(cacheControl);
- return response.build();
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.SERVICE_UNAVAILABLE);
- }
- }
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/StorageClusterVersionResource.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/StorageClusterVersionResource.java
deleted file mode 100644
index 51c84f0..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/StorageClusterVersionResource.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Produces;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import javax.ws.rs.core.Response.ResponseBuilder;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.stargate.model.StorageClusterVersionModel;
-
-public class StorageClusterVersionResource implements Constants {
- private static final Log LOG =
- LogFactory.getLog(StorageClusterVersionResource.class);
-
- private CacheControl cacheControl;
- private RESTServlet servlet;
-
- public StorageClusterVersionResource() throws IOException {
- servlet = RESTServlet.getInstance();
- cacheControl = new CacheControl();
- cacheControl.setNoCache(true);
- cacheControl.setNoTransform(false);
- }
-
- @GET
- @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON})
- public Response get(final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- Configuration conf = servlet.getConfiguration();
- try {
- HBaseAdmin admin = new HBaseAdmin(conf);
- StorageClusterVersionModel model = new StorageClusterVersionModel();
- model.setVersion(admin.getClusterStatus().getHBaseVersion());
- ResponseBuilder response = Response.ok(model);
- response.cacheControl(cacheControl);
- return response.build();
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.SERVICE_UNAVAILABLE);
- }
- }
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/TableResource.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/TableResource.java
deleted file mode 100644
index eebaf45..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/TableResource.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.WebApplicationException;
-import javax.ws.rs.core.Response;
-
-import org.apache.hadoop.hbase.stargate.User;
-
-public class TableResource implements Constants {
-
- User user;
- String table;
-
- public TableResource(User user, String table) {
- this.user = user;
- this.table = table;
- }
-
- @Path("regions")
- public RegionsResource getRegionsResource() throws IOException {
- return new RegionsResource(user, table);
- }
-
- @Path("scanner")
- public ScannerResource getScannerResource() throws IOException {
- return new ScannerResource(user, table);
- }
-
- @Path("schema")
- public SchemaResource getSchemaResource() throws IOException {
- return new SchemaResource(user, table);
- }
-
- @Path("{rowspec: .+}")
- public RowResource getRowResource(
- final @PathParam("rowspec") String rowspec,
- final @QueryParam("v") String versions) {
- try {
- return new RowResource(user, table, rowspec, versions);
- } catch (IOException e) {
- throw new WebApplicationException(e,
- Response.Status.INTERNAL_SERVER_ERROR);
- }
- }
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/User.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/User.java
deleted file mode 100644
index b317c33..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/User.java
+++ /dev/null
@@ -1,155 +0,0 @@
-package org.apache.hadoop.hbase.stargate;
-
-import java.security.MessageDigest;
-
-import org.apache.hadoop.hbase.util.Bytes;
-
-/** Representation of an authorized user */
-public class User implements Constants {
-
- public static final User DEFAULT_USER = new User("default",
- "00000000000000000000000000000000", false, true);
-
- private String name;
- private String token;
- private boolean admin;
- private boolean disabled = false;
-
- /**
- * Constructor
- *
- * Creates an access token. (Normally, you don't want this.)
- * @param name user name
- * @param admin true if user has administrator privilege
- * @throws Exception
- */
- public User(String name, boolean admin) throws Exception {
- this.name = name;
- this.admin = admin;
- byte[] digest = MessageDigest.getInstance("MD5")
- .digest(Bytes.toBytes(name));
- StringBuffer sb = new StringBuffer();
- for (int i = 0; i < digest.length; i++) {
- sb.append(Integer.toHexString(0xff & digest[i]));
- }
- this.token = sb.toString();
- }
-
- /**
- * Constructor
- * @param name user name
- * @param token access token, a 16 char hex string
- * @param admin true if user has administrator privilege
- */
- public User(String name, String token, boolean admin) {
- this(name, token, admin, false);
- }
-
- /**
- * Constructor
- * @param name user name
- * @param token access token, a 16 char hex string
- * @param admin true if user has administrator privilege
- * @param disabled true if user is disabled
- */
- public User(String name, String token, boolean admin, boolean disabled) {
- this.name = name;
- this.token = token;
- this.admin = admin;
- this.disabled = disabled;
- }
-
- /**
- * @return user name
- */
- public String getName() {
- return name;
- }
-
- /**
- * @param name user name
- */
- public void setName(final String name) {
- this.name = name;
- }
-
- /**
- * @return access token, a 16 char hex string
- */
- public String getToken() {
- return token;
- }
-
- /**
- * @param token access token, a 16 char hex string
- */
- public void setToken(final String token) {
- this.token = token;
- }
-
- /**
- * @return true if user has administrator privilege
- */
- public boolean isAdmin() {
- return admin;
- }
-
- /**
- * @param admin true if user has administrator privilege
- */
- public void setAdmin(final boolean admin) {
- this.admin = admin;
- }
-
- /**
- * @return true if user is disabled
- */
- public boolean isDisabled() {
- return disabled;
- }
-
- /**
- * @param admin true if user is disabled
- */
- public void setDisabled(boolean disabled) {
- this.disabled = disabled;
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + (admin ? 1231 : 1237);
- result = prime * result + (disabled ? 1231 : 1237);
- result = prime * result + ((name == null) ? 0 : name.hashCode());
- result = prime * result + ((token == null) ? 0 : token.hashCode());
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (obj == null)
- return false;
- if (getClass() != obj.getClass())
- return false;
- User other = (User) obj;
- if (admin != other.admin)
- return false;
- if (disabled != other.disabled)
- return false;
- if (name == null) {
- if (other.name != null)
- return false;
- } else if (!name.equals(other.name))
- return false;
- if (token == null) {
- if (other.token != null)
- return false;
- } else if (!token.equals(other.token))
- return false;
- return true;
- }
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/VersionResource.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/VersionResource.java
deleted file mode 100644
index a778278..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/VersionResource.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate;
-
-import java.io.IOException;
-
-import javax.servlet.ServletContext;
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.CacheControl;
-import javax.ws.rs.core.Context;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import javax.ws.rs.core.Response.ResponseBuilder;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.stargate.model.VersionModel;
-
-/**
- * Implements Stargate software version reporting via
- *
- * /version/stargate
- *
- * /version (alias for /version/stargate )
- */
-public class VersionResource implements Constants {
- private static final Log LOG = LogFactory.getLog(VersionResource.class);
-
- private CacheControl cacheControl;
- private RESTServlet servlet;
-
- public VersionResource() throws IOException {
- servlet = RESTServlet.getInstance();
- cacheControl = new CacheControl();
- cacheControl.setNoCache(true);
- cacheControl.setNoTransform(false);
- }
-
- /**
- * Build a response for a version request.
- * @param context servlet context
- * @param uriInfo (JAX-RS context variable) request URL
- * @return a response for a version request
- */
- @GET
- @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF})
- public Response get(final @Context ServletContext context,
- final @Context UriInfo uriInfo) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("GET " + uriInfo.getAbsolutePath());
- }
- servlet.getMetrics().incrementRequests(1);
- ResponseBuilder response = Response.ok(new VersionModel(context));
- response.cacheControl(cacheControl);
- return response.build();
- }
-
- /**
- * Dispatch to StorageClusterVersionResource
- */
- @Path("cluster")
- public StorageClusterVersionResource getClusterVersionResource()
- throws IOException {
- return new StorageClusterVersionResource();
- }
-
- /**
- * Dispatch /version/stargate to self.
- */
- @Path("stargate")
- public VersionResource getVersionResource() {
- return this;
- }
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/auth/Authenticator.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/auth/Authenticator.java
deleted file mode 100644
index dcba2c8..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/auth/Authenticator.java
+++ /dev/null
@@ -1,11 +0,0 @@
-package org.apache.hadoop.hbase.stargate.auth;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.stargate.User;
-
-public abstract class Authenticator {
-
- public abstract User getUserForToken(String token) throws IOException;
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/auth/HBCAuthenticator.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/auth/HBCAuthenticator.java
deleted file mode 100644
index 8248bd3..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/auth/HBCAuthenticator.java
+++ /dev/null
@@ -1,39 +0,0 @@
-package org.apache.hadoop.hbase.stargate.auth;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.stargate.User;
-
-public class HBCAuthenticator extends Authenticator {
-
- Configuration conf;
-
- /**
- * Default constructor
- */
- public HBCAuthenticator() {
- this(HBaseConfiguration.create());
- }
-
- /**
- * Constructor
- * @param conf
- */
- public HBCAuthenticator(Configuration conf) {
- this.conf = conf;
- }
-
- @Override
- public User getUserForToken(String token) {
- String name = conf.get("stargate.auth.token." + token);
- if (name == null) {
- return null;
- }
- boolean admin = conf.getBoolean("stargate.auth.user." + name + ".admin",
- false);
- boolean disabled = conf.getBoolean("stargate.auth.user." + name + ".disabled",
- false);
- return new User(name, token, admin, disabled);
- }
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/auth/HTableAuthenticator.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/auth/HTableAuthenticator.java
deleted file mode 100644
index e1cfeb6..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/auth/HTableAuthenticator.java
+++ /dev/null
@@ -1,90 +0,0 @@
-package org.apache.hadoop.hbase.stargate.auth;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.stargate.Constants;
-import org.apache.hadoop.hbase.stargate.User;
-import org.apache.hadoop.hbase.util.Bytes;
-
-public class HTableAuthenticator extends Authenticator implements Constants {
-
- static final byte[] USER = Bytes.toBytes("user");
- static final byte[] NAME = Bytes.toBytes("name");
- static final byte[] ADMIN = Bytes.toBytes("admin");
- static final byte[] DISABLED = Bytes.toBytes("disabled");
-
- Configuration conf;
- String tableName;
- HTable table;
-
- /**
- * Default constructor
- */
- public HTableAuthenticator() {
- this(HBaseConfiguration.create());
- }
-
- /**
- * Constructor
- * @param conf
- */
- public HTableAuthenticator(Configuration conf) {
- this(conf, conf.get("stargate.auth.htable.name", USERS_TABLE));
- }
-
- /**
- * Constructor
- * @param conf
- * @param tableName
- */
- public HTableAuthenticator(Configuration conf, String tableName) {
- this.conf = conf;
- this.tableName = tableName;
- }
-
- /**
- * Constructor
- * @param conf
- * @param table
- */
- public HTableAuthenticator(Configuration conf, HTable table) {
- this.conf = conf;
- this.table = table;
- this.tableName = Bytes.toString(table.getTableName());
- }
-
- @Override
- public User getUserForToken(String token) throws IOException {
- if (table == null) {
- this.table = new HTable(conf, tableName);
- }
- Get get = new Get(Bytes.toBytes(token));
- get.addColumn(USER, NAME);
- get.addColumn(USER, ADMIN);
- get.addColumn(USER, DISABLED);
- Result result = table.get(get);
- byte[] value = result.getValue(USER, NAME);
- if (value == null) {
- return null;
- }
- String name = Bytes.toString(value);
- boolean admin = false;
- value = result.getValue(USER, ADMIN);
- if (value != null) {
- admin = Bytes.toBoolean(value);
- }
- boolean disabled = false;
- value = result.getValue(USER, DISABLED);
- if (value != null) {
- disabled = Bytes.toBoolean(value);
- }
- return new User(name, token, admin, disabled);
- }
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/auth/JDBCAuthenticator.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/auth/JDBCAuthenticator.java
deleted file mode 100644
index 7ccc464..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/auth/JDBCAuthenticator.java
+++ /dev/null
@@ -1,88 +0,0 @@
-package org.apache.hadoop.hbase.stargate.auth;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.stargate.User;
-import org.apache.hadoop.util.StringUtils;
-
-public class JDBCAuthenticator extends Authenticator {
-
- static final Log LOG = LogFactory.getLog(JDBCAuthenticator.class);
- static final int MAX_RETRIES = 5;
- static final long RETRY_SLEEP_TIME = 1000 * 2;
-
- String url;
- String table;
- String user;
- String password;
- Connection connection;
- PreparedStatement userFetchStmt;
-
- /**
- * Constructor
- * @param conf
- */
- public JDBCAuthenticator(HBaseConfiguration conf) {
- this(conf.get("stargate.auth.jdbc.url"),
- conf.get("stargate.auth.jdbc.table"),
- conf.get("stargate.auth.jdbc.user"),
- conf.get("stargate.auth.jdbc.password"));
- }
-
- /**
- * Constructor
- * @param url
- * @param table
- * @param user
- * @param password
- */
- public JDBCAuthenticator(String url, String table, String user,
- String password) {
- this.url = url;
- this.table = table;
- this.user = user;
- this.password = password;
- }
-
- @Override
- public User getUserForToken(String token) throws IOException {
- int retries = 0;
- while (true) try {
- if (connection == null) {
- connection = DriverManager.getConnection(url, user, password);
- userFetchStmt = connection.prepareStatement(
- "SELECT name, admin, disabled FROM " + table + " WHERE token = ?");
- }
- ResultSet results;
- synchronized (userFetchStmt) {
- userFetchStmt.setString(1, token);
- results = userFetchStmt.executeQuery();
- }
- if (!results.next()) {
- return null;
- }
- return new User(results.getString(1), token, results.getBoolean(2),
- results.getBoolean(3));
- } catch (SQLException e) {
- connection = null;
- if (++retries > MAX_RETRIES) {
- throw new IOException(e);
- } else try {
- LOG.warn(StringUtils.stringifyException(e));
- Thread.sleep(RETRY_SLEEP_TIME);
- } catch (InterruptedException ex) {
- // ignore
- }
- }
- }
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/auth/ZooKeeperAuthenticator.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/auth/ZooKeeperAuthenticator.java
deleted file mode 100644
index 77dc247..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/auth/ZooKeeperAuthenticator.java
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate.auth;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.stargate.Constants;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.stargate.User;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
-
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.ZooDefs.Ids;
-import org.apache.zookeeper.data.Stat;
-
-import org.json.JSONObject;
-
-/**
- * A simple authenticator module for ZooKeeper.
- *
- * /stargate/
- * users/
- * <token>
- * Where <token> is a JSON formatted user record with the keys
- * 'name' (String, required), 'token' (String, optional), 'admin' (boolean,
- * optional), and 'disabled' (boolean, optional).
- */
-public class ZooKeeperAuthenticator extends Authenticator
- implements Constants {
-
- final String usersZNode;
- ZooKeeperWrapper wrapper;
-
- private boolean ensureParentExists(final String znode) {
- int index = znode.lastIndexOf("/");
- if (index <= 0) { // Parent is root, which always exists.
- return true;
- }
- return ensureExists(znode.substring(0, index));
- }
-
- private boolean ensureExists(final String znode) {
- ZooKeeper zk = wrapper.getZooKeeper();
- try {
- Stat stat = zk.exists(znode, false);
- if (stat != null) {
- return true;
- }
- zk.create(znode, new byte[0], Ids.OPEN_ACL_UNSAFE,
- CreateMode.PERSISTENT);
- return true;
- } catch (KeeperException.NodeExistsException e) {
- return true; // ok, move on.
- } catch (KeeperException.NoNodeException e) {
- return ensureParentExists(znode) && ensureExists(znode);
- } catch (KeeperException e) {
- } catch (InterruptedException e) {
- }
- return false;
- }
-
- /**
- * Constructor
- * @param conf
- * @throws IOException
- */
- public ZooKeeperAuthenticator(Configuration conf) throws IOException {
- this(conf, new ZooKeeperWrapper(conf, new Watcher() {
- public void process(WatchedEvent event) { }
- }));
- ensureExists(USERS_ZNODE_ROOT);
- }
-
- /**
- * Constructor
- * @param conf
- * @param wrapper
- */
- public ZooKeeperAuthenticator(Configuration conf,
- ZooKeeperWrapper wrapper) {
- this.usersZNode = conf.get("stargate.auth.zk.users", USERS_ZNODE_ROOT);
- this.wrapper = wrapper;
- }
-
- @Override
- public User getUserForToken(String token) throws IOException {
- ZooKeeper zk = wrapper.getZooKeeper();
- try {
- byte[] data = zk.getData(usersZNode + "/" + token, null, null);
- if (data == null) {
- return null;
- }
- JSONObject o = new JSONObject(Bytes.toString(data));
- if (!o.has("name")) {
- throw new IOException("invalid record, missing 'name'");
- }
- String name = o.getString("name");
- boolean admin = false;
- if (o.has("admin")) { admin = o.getBoolean("admin"); }
- boolean disabled = false;
- if (o.has("disabled")) { disabled = o.getBoolean("disabled"); }
- return new User(name, token, admin, disabled);
- } catch (KeeperException.NoNodeException e) {
- return null;
- } catch (Exception e) {
- throw new IOException(e);
- }
- }
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/client/Client.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/client/Client.java
deleted file mode 100644
index 4466876..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/client/Client.java
+++ /dev/null
@@ -1,438 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate.client;
-
-import java.io.IOException;
-
-import org.apache.commons.httpclient.Header;
-import org.apache.commons.httpclient.HttpClient;
-import org.apache.commons.httpclient.HttpMethod;
-import org.apache.commons.httpclient.HttpVersion;
-import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager;
-import org.apache.commons.httpclient.URI;
-import org.apache.commons.httpclient.methods.ByteArrayRequestEntity;
-import org.apache.commons.httpclient.methods.DeleteMethod;
-import org.apache.commons.httpclient.methods.GetMethod;
-import org.apache.commons.httpclient.methods.HeadMethod;
-import org.apache.commons.httpclient.methods.PostMethod;
-import org.apache.commons.httpclient.methods.PutMethod;
-import org.apache.commons.httpclient.params.HttpClientParams;
-import org.apache.commons.httpclient.params.HttpConnectionManagerParams;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-/**
- * A wrapper around HttpClient which provides some useful function and
- * semantics for interacting with the Stargate REST gateway.
- */
-public class Client {
- public static final Header[] EMPTY_HEADER_ARRAY = new Header[0];
-
- private static final Log LOG = LogFactory.getLog(Client.class);
-
- private HttpClient httpClient;
- private Cluster cluster;
-
- /**
- * Default Constructor
- */
- public Client() {
- this(null);
- }
-
- /**
- * Constructor
- * @param cluster the cluster definition
- */
- public Client(Cluster cluster) {
- this.cluster = cluster;
- httpClient = new HttpClient(new MultiThreadedHttpConnectionManager());
- HttpConnectionManagerParams managerParams =
- httpClient.getHttpConnectionManager().getParams();
- managerParams.setConnectionTimeout(2000); // 2 s
- HttpClientParams clientParams = httpClient.getParams();
- clientParams.setVersion(HttpVersion.HTTP_1_1);
- }
-
- /**
- * Shut down the client. Close any open persistent connections.
- */
- public void shutdown() {
- MultiThreadedHttpConnectionManager manager =
- (MultiThreadedHttpConnectionManager) httpClient.getHttpConnectionManager();
- manager.shutdown();
- }
-
- /**
- * Execute a transaction method given only the path. Will select at random
- * one of the members of the supplied cluster definition and iterate through
- * the list until a transaction can be successfully completed. The
- * definition of success here is a complete HTTP transaction, irrespective
- * of result code.
- * @param cluster the cluster definition
- * @param method the transaction method
- * @param headers HTTP header values to send
- * @param path the path
- * @return the HTTP response code
- * @throws IOException
- */
- @SuppressWarnings("deprecation")
- public int executePathOnly(Cluster cluster, HttpMethod method,
- Header[] headers, String path) throws IOException {
- IOException lastException;
- if (cluster.nodes.size() < 1) {
- throw new IOException("Cluster is empty");
- }
- int start = (int)Math.round((cluster.nodes.size() - 1) * Math.random());
- int i = start;
- do {
- cluster.lastHost = cluster.nodes.get(i);
- try {
- StringBuilder sb = new StringBuilder();
- sb.append("http://");
- sb.append(cluster.lastHost);
- sb.append(path);
- URI uri = new URI(sb.toString());
- return executeURI(method, headers, uri.toString());
- } catch (IOException e) {
- lastException = e;
- }
- } while (++i != start && i < cluster.nodes.size());
- throw lastException;
- }
-
- /**
- * Execute a transaction method given a complete URI.
- * @param method the transaction method
- * @param headers HTTP header values to send
- * @param uri the URI
- * @return the HTTP response code
- * @throws IOException
- */
- @SuppressWarnings("deprecation")
- public int executeURI(HttpMethod method, Header[] headers, String uri)
- throws IOException {
- method.setURI(new URI(uri));
- if (headers != null) {
- for (Header header: headers) {
- method.addRequestHeader(header);
- }
- }
- long startTime = System.currentTimeMillis();
- int code = httpClient.executeMethod(method);
- long endTime = System.currentTimeMillis();
- if (LOG.isDebugEnabled()) {
- LOG.debug(method.getName() + " " + uri + ": " + code + " " +
- method.getStatusText() + " in " + (endTime - startTime) + " ms");
- }
- return code;
- }
-
- /**
- * Execute a transaction method. Will call either executePathOnly
- * or executeURI depending on whether a path only is supplied in
- * 'path', or if a complete URI is passed instead, respectively.
- * @param cluster the cluster definition
- * @param method the HTTP method
- * @param headers HTTP header values to send
- * @param path the path or URI
- * @return the HTTP response code
- * @throws IOException
- */
- public int execute(Cluster cluster, HttpMethod method, Header[] headers,
- String path) throws IOException {
- if (path.startsWith("/")) {
- return executePathOnly(cluster, method, headers, path);
- }
- return executeURI(method, headers, path);
- }
-
- /**
- * @return the cluster definition
- */
- public Cluster getCluster() {
- return cluster;
- }
-
- /**
- * @param cluster the cluster definition
- */
- public void setCluster(Cluster cluster) {
- this.cluster = cluster;
- }
-
- /**
- * Send a HEAD request
- * @param path the path or URI
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response head(String path) throws IOException {
- return head(cluster, path, null);
- }
-
- /**
- * Send a HEAD request
- * @param cluster the cluster definition
- * @param path the path or URI
- * @param headers the HTTP headers to include in the request
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response head(Cluster cluster, String path, Header[] headers)
- throws IOException {
- HeadMethod method = new HeadMethod();
- int code = execute(cluster, method, null, path);
- headers = method.getResponseHeaders();
- method.releaseConnection();
- return new Response(code, headers, null);
- }
-
- /**
- * Send a GET request
- * @param path the path or URI
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response get(String path) throws IOException {
- return get(cluster, path);
- }
-
- /**
- * Send a GET request
- * @param cluster the cluster definition
- * @param path the path or URI
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response get(Cluster cluster, String path) throws IOException {
- return get(cluster, path, EMPTY_HEADER_ARRAY);
- }
-
- /**
- * Send a GET request
- * @param path the path or URI
- * @param accept Accept header value
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response get(String path, String accept) throws IOException {
- return get(cluster, path, accept);
- }
-
- /**
- * Send a GET request
- * @param cluster the cluster definition
- * @param path the path or URI
- * @param accept Accept header value
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response get(Cluster cluster, String path, String accept)
- throws IOException {
- Header[] headers = new Header[1];
- headers[0] = new Header("Accept", accept);
- return get(cluster, path, headers);
- }
-
- /**
- * Send a GET request
- * @param path the path or URI
- * @param headers the HTTP headers to include in the request,
- * Accept must be supplied
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response get(String path, Header[] headers) throws IOException {
- return get(cluster, path, headers);
- }
-
- /**
- * Send a GET request
- * @param c the cluster definition
- * @param path the path or URI
- * @param headers the HTTP headers to include in the request
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response get(Cluster c, String path, Header[] headers)
- throws IOException {
- GetMethod method = new GetMethod();
- int code = execute(c, method, headers, path);
- headers = method.getResponseHeaders();
- byte[] body = method.getResponseBody();
- method.releaseConnection();
- return new Response(code, headers, body);
- }
-
- /**
- * Send a PUT request
- * @param path the path or URI
- * @param contentType the content MIME type
- * @param content the content bytes
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response put(String path, String contentType, byte[] content)
- throws IOException {
- return put(cluster, path, contentType, content);
- }
-
- /**
- * Send a PUT request
- * @param cluster the cluster definition
- * @param path the path or URI
- * @param contentType the content MIME type
- * @param content the content bytes
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response put(Cluster cluster, String path, String contentType,
- byte[] content) throws IOException {
- Header[] headers = new Header[1];
- headers[0] = new Header("Content-Type", contentType);
- return put(cluster, path, headers, content);
- }
-
- /**
- * Send a PUT request
- * @param path the path or URI
- * @param headers the HTTP headers to include, Content-Type must be
- * supplied
- * @param content the content bytes
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response put(String path, Header[] headers, byte[] content)
- throws IOException {
- return put(cluster, path, headers, content);
- }
-
- /**
- * Send a PUT request
- * @param cluster the cluster definition
- * @param path the path or URI
- * @param headers the HTTP headers to include, Content-Type must be
- * supplied
- * @param content the content bytes
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response put(Cluster cluster, String path, Header[] headers,
- byte[] content) throws IOException {
- PutMethod method = new PutMethod();
- method.setRequestEntity(new ByteArrayRequestEntity(content));
- int code = execute(cluster, method, headers, path);
- headers = method.getResponseHeaders();
- content = method.getResponseBody();
- method.releaseConnection();
- return new Response(code, headers, content);
- }
-
- /**
- * Send a POST request
- * @param path the path or URI
- * @param contentType the content MIME type
- * @param content the content bytes
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response post(String path, String contentType, byte[] content)
- throws IOException {
- return post(cluster, path, contentType, content);
- }
-
- /**
- * Send a POST request
- * @param cluster the cluster definition
- * @param path the path or URI
- * @param contentType the content MIME type
- * @param content the content bytes
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response post(Cluster cluster, String path, String contentType,
- byte[] content) throws IOException {
- Header[] headers = new Header[1];
- headers[0] = new Header("Content-Type", contentType);
- return post(cluster, path, headers, content);
- }
-
- /**
- * Send a POST request
- * @param path the path or URI
- * @param headers the HTTP headers to include, Content-Type must be
- * supplied
- * @param content the content bytes
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response post(String path, Header[] headers, byte[] content)
- throws IOException {
- return post(cluster, path, headers, content);
- }
-
- /**
- * Send a POST request
- * @param cluster the cluster definition
- * @param path the path or URI
- * @param headers the HTTP headers to include, Content-Type must be
- * supplied
- * @param content the content bytes
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response post(Cluster cluster, String path, Header[] headers,
- byte[] content) throws IOException {
- PostMethod method = new PostMethod();
- method.setRequestEntity(new ByteArrayRequestEntity(content));
- int code = execute(cluster, method, headers, path);
- headers = method.getResponseHeaders();
- content = method.getResponseBody();
- method.releaseConnection();
- return new Response(code, headers, content);
- }
-
- /**
- * Send a DELETE request
- * @param path the path or URI
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response delete(String path) throws IOException {
- return delete(cluster, path);
- }
-
- /**
- * Send a DELETE request
- * @param cluster the cluster definition
- * @param path the path or URI
- * @return a Response object with response detail
- * @throws IOException
- */
- public Response delete(Cluster cluster, String path) throws IOException {
- DeleteMethod method = new DeleteMethod();
- int code = execute(cluster, method, null, path);
- Header[] headers = method.getResponseHeaders();
- method.releaseConnection();
- return new Response(code, headers);
- }
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/client/Cluster.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/client/Cluster.java
deleted file mode 100644
index 2264256..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/client/Cluster.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate.client;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-/**
- * A list of 'host:port' addresses of HTTP servers operating as a single
- * entity, for example multiple redundant web service gateways.
- */
-public class Cluster {
- protected List nodes =
- Collections.synchronizedList(new ArrayList());
- protected String lastHost;
-
- /**
- * Constructor
- */
- public Cluster() {}
-
- /**
- * Constructor
- * @param nodes a list of service locations, in 'host:port' format
- */
- public Cluster(List nodes) {
- nodes.addAll(nodes);
- }
-
- /**
- * Add a node to the cluster
- * @param node the service location in 'host:port' format
- */
- public Cluster add(String node) {
- nodes.add(node);
- return this;
- }
-
- /**
- * Add a node to the cluster
- * @param name host name
- * @param port service port
- */
- public Cluster add(String name, int port) {
- StringBuilder sb = new StringBuilder();
- sb.append(name);
- sb.append(':');
- sb.append(port);
- return add(sb.toString());
- }
-
- /**
- * Remove a node from the cluster
- * @param node the service location in 'host:port' format
- */
- public Cluster remove(String node) {
- nodes.remove(node);
- return this;
- }
-
- /**
- * Remove a node from the cluster
- * @param name host name
- * @param port service port
- */
- public Cluster remove(String name, int port) {
- StringBuilder sb = new StringBuilder();
- sb.append(name);
- sb.append(':');
- sb.append(port);
- return remove(sb.toString());
- }
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/client/Response.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/client/Response.java
deleted file mode 100644
index 11637a4..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/client/Response.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate.client;
-
-import org.apache.commons.httpclient.Header;
-
-/**
- * The HTTP result code, response headers, and body of a HTTP response.
- */
-public class Response {
- private int code;
- private Header[] headers;
- private byte[] body;
-
- /**
- * Constructor
- * @param code the HTTP response code
- */
- public Response(int code) {
- this(code, null, null);
- }
-
- /**
- * Constructor
- * @param code the HTTP response code
- * @param headers the HTTP response headers
- */
- public Response(int code, Header[] headers) {
- this(code, headers, null);
- }
-
- /**
- * Constructor
- * @param code the HTTP response code
- * @param headers the HTTP response headers
- * @param body the response body, can be null
- */
- public Response(int code, Header[] headers, byte[] body) {
- this.code = code;
- this.headers = headers;
- this.body = body;
- }
-
- /**
- * @return the HTTP response code
- */
- public int getCode() {
- return code;
- }
-
- /**
- * @return the HTTP response headers
- */
- public Header[] getHeaders() {
- return headers;
- }
-
- /**
- * @return the value of the Location header
- */
- public String getLocation() {
- for (Header header: headers) {
- if (header.getName().equals("Location")) {
- return header.getValue();
- }
- }
- return null;
- }
-
- /**
- * @return true if a response body was sent
- */
- public boolean hasBody() {
- return body != null;
- }
-
- /**
- * @return the HTTP response body
- */
- public byte[] getBody() {
- return body;
- }
-
- /**
- * @param code the HTTP response code
- */
- public void setCode(int code) {
- this.code = code;
- }
-
- /**
- * @param headers the HTTP response headers
- */
- public void setHeaders(Header[] headers) {
- this.headers = headers;
- }
-
- /**
- * @param body the response body
- */
- public void setBody(byte[] body) {
- this.body = body;
- }
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/metrics/StargateMetrics.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/metrics/StargateMetrics.java
deleted file mode 100644
index a53988b..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/metrics/StargateMetrics.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate.metrics;
-
-import org.apache.hadoop.hbase.metrics.MetricsRate;
-
-import org.apache.hadoop.metrics.MetricsContext;
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.MetricsUtil;
-import org.apache.hadoop.metrics.Updater;
-import org.apache.hadoop.metrics.jvm.JvmMetrics;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-
-public class StargateMetrics implements Updater {
- private final MetricsRecord metricsRecord;
- private final MetricsRegistry registry = new MetricsRegistry();
- private final StargateStatistics stargateStatistics;
-
- private MetricsRate requests = new MetricsRate("requests", registry);
-
- public StargateMetrics() {
- MetricsContext context = MetricsUtil.getContext("stargate");
- metricsRecord = MetricsUtil.createRecord(context, "stargate");
- String name = Thread.currentThread().getName();
- metricsRecord.setTag("Master", name);
- context.registerUpdater(this);
- JvmMetrics.init("Stargate", name);
- // expose the MBean for metrics
- stargateStatistics = new StargateStatistics(registry);
-
- }
-
- public void shutdown() {
- if (stargateStatistics != null) {
- stargateStatistics.shutdown();
- }
- }
-
- /**
- * Since this object is a registered updater, this method will be called
- * periodically, e.g. every 5 seconds.
- * @param unused
- */
- public void doUpdates(MetricsContext unused) {
- synchronized (this) {
- requests.pushMetric(metricsRecord);
- }
- this.metricsRecord.update();
- }
-
- public void resetAllMinMax() {
- // Nothing to do
- }
-
- /**
- * @return Count of requests.
- */
- public float getRequests() {
- return requests.getPreviousIntervalValue();
- }
-
- /**
- * @param inc How much to add to requests.
- */
- public void incrementRequests(final int inc) {
- requests.inc(inc);
- }
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/metrics/StargateStatistics.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/metrics/StargateStatistics.java
deleted file mode 100644
index d3f874a..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/metrics/StargateStatistics.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate.metrics;
-
-import javax.management.ObjectName;
-
-import org.apache.hadoop.hbase.metrics.MetricsMBeanBase;
-
-import org.apache.hadoop.metrics.util.MBeanUtil;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-
-public class StargateStatistics extends MetricsMBeanBase {
- private final ObjectName mbeanName;
-
- public StargateStatistics(MetricsRegistry registry) {
- super(registry, "StargateStatistics");
- mbeanName = MBeanUtil.registerMBean("Stargate",
- "StargateStatistics", this);
- }
-
- public void shutdown() {
- if (mbeanName != null) {
- MBeanUtil.unregisterMBean(mbeanName);
- }
- }
-
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/model/CellModel.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/model/CellModel.java
deleted file mode 100644
index 284ec0f..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/model/CellModel.java
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate.model;
-
-import java.io.IOException;
-import java.io.Serializable;
-
-import javax.xml.bind.annotation.XmlAttribute;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.XmlType;
-import javax.xml.bind.annotation.XmlValue;
-
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
-import org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell;
-
-import com.google.protobuf.ByteString;
-
-/**
- * Representation of a cell. A cell is a single value associated a column and
- * optional qualifier, and either the timestamp when it was stored or the user-
- * provided timestamp if one was explicitly supplied.
- *
- *
- * <complexType name="Cell">
- * <sequence>
- * <element name="value" maxOccurs="1" minOccurs="1">
- * <simpleType>
- * <restriction base="base64Binary"/>
- * </simpleType>
- * </element>
- * </sequence>
- * <attribute name="column" type="base64Binary" />
- * <attribute name="timestamp" type="int" />
- * </complexType>
- *
- */
-@XmlRootElement(name="Cell")
-@XmlType(propOrder={"column","timestamp"})
-public class CellModel implements ProtobufMessageHandler, Serializable {
- private static final long serialVersionUID = 1L;
-
- private long timestamp = HConstants.LATEST_TIMESTAMP;
- private byte[] column;
- private byte[] value;
-
- /**
- * Default constructor
- */
- public CellModel() {}
-
- /**
- * Constructor
- * @param column
- * @param value
- */
- public CellModel(byte[] column, byte[] value) {
- this(column, HConstants.LATEST_TIMESTAMP, value);
- }
-
- /**
- * Constructor
- * @param column
- * @param qualifier
- * @param value
- */
- public CellModel(byte[] column, byte[] qualifier, byte[] value) {
- this(column, qualifier, HConstants.LATEST_TIMESTAMP, value);
- }
-
- /**
- * Constructor from KeyValue
- * @param kv
- */
- public CellModel(KeyValue kv) {
- this(kv.getFamily(), kv.getQualifier(), kv.getTimestamp(), kv.getValue());
- }
-
- /**
- * Constructor
- * @param column
- * @param timestamp
- * @param value
- */
- public CellModel(byte[] column, long timestamp, byte[] value) {
- this.column = column;
- this.timestamp = timestamp;
- this.value = value;
- }
-
- /**
- * Constructor
- * @param column
- * @param qualifier
- * @param timestamp
- * @param value
- */
- public CellModel(byte[] column, byte[] qualifier, long timestamp,
- byte[] value) {
- this.column = KeyValue.makeColumn(column, qualifier);
- this.timestamp = timestamp;
- this.value = value;
- }
-
- /**
- * @return the column
- */
- @XmlAttribute
- public byte[] getColumn() {
- return column;
- }
-
- /**
- * @param column the column to set
- */
- public void setColumn(byte[] column) {
- this.column = column;
- }
-
- /**
- * @return true if the timestamp property has been specified by the
- * user
- */
- public boolean hasUserTimestamp() {
- return timestamp != HConstants.LATEST_TIMESTAMP;
- }
-
- /**
- * @return the timestamp
- */
- @XmlAttribute
- public long getTimestamp() {
- return timestamp;
- }
-
- /**
- * @param timestamp the timestamp to set
- */
- public void setTimestamp(long timestamp) {
- this.timestamp = timestamp;
- }
-
- /**
- * @return the value
- */
- @XmlValue
- public byte[] getValue() {
- return value;
- }
-
- /**
- * @param value the value to set
- */
- public void setValue(byte[] value) {
- this.value = value;
- }
-
- @Override
- public byte[] createProtobufOutput() {
- Cell.Builder builder = Cell.newBuilder();
- builder.setColumn(ByteString.copyFrom(getColumn()));
- builder.setData(ByteString.copyFrom(getValue()));
- if (hasUserTimestamp()) {
- builder.setTimestamp(getTimestamp());
- }
- return builder.build().toByteArray();
- }
-
- @Override
- public ProtobufMessageHandler getObjectFromMessage(byte[] message)
- throws IOException {
- Cell.Builder builder = Cell.newBuilder();
- builder.mergeFrom(message);
- setColumn(builder.getColumn().toByteArray());
- setValue(builder.getData().toByteArray());
- if (builder.hasTimestamp()) {
- setTimestamp(builder.getTimestamp());
- }
- return this;
- }
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/model/CellSetModel.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/model/CellSetModel.java
deleted file mode 100644
index 7b9613f..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/model/CellSetModel.java
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate.model;
-
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.XmlElement;
-
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
-import org.apache.hadoop.hbase.stargate.protobuf.generated.CellMessage.Cell;
-import org.apache.hadoop.hbase.stargate.protobuf.generated.CellSetMessage.CellSet;
-
-import com.google.protobuf.ByteString;
-
-/**
- * Representation of a grouping of cells. May contain cells from more than
- * one row. Encapsulates RowModel and CellModel models.
- *
- *
- * <complexType name="CellSet">
- * <sequence>
- * <element name="row" type="tns:Row" maxOccurs="unbounded"
- * minOccurs="1"></element>
- * </sequence>
- * </complexType>
- *
- * <complexType name="Row">
- * <sequence>
- * <element name="key" type="base64Binary"></element>
- * <element name="cell" type="tns:Cell"
- * maxOccurs="unbounded" minOccurs="1"></element>
- * </sequence>
- * </complexType>
- *
- * <complexType name="Cell">
- * <sequence>
- * <element name="value" maxOccurs="1" minOccurs="1">
- * <simpleType>
- * <restriction base="base64Binary"/>
- * </simpleType>
- * </element>
- * </sequence>
- * <attribute name="column" type="base64Binary" />
- * <attribute name="timestamp" type="int" />
- * </complexType>
- *
- */
-@XmlRootElement(name="CellSet")
-public class CellSetModel implements Serializable, ProtobufMessageHandler {
-
- private static final long serialVersionUID = 1L;
-
- private List rows;
-
- /**
- * Constructor
- */
- public CellSetModel() {
- this.rows = new ArrayList();
- }
-
- /**
- * @param rows the rows
- */
- public CellSetModel(List rows) {
- super();
- this.rows = rows;
- }
-
- /**
- * Add a row to this cell set
- * @param row the row
- */
- public void addRow(RowModel row) {
- rows.add(row);
- }
-
- /**
- * @return the rows
- */
- @XmlElement(name="Row")
- public List getRows() {
- return rows;
- }
-
- @Override
- public byte[] createProtobufOutput() {
- CellSet.Builder builder = CellSet.newBuilder();
- for (RowModel row: getRows()) {
- CellSet.Row.Builder rowBuilder = CellSet.Row.newBuilder();
- rowBuilder.setKey(ByteString.copyFrom(row.getKey()));
- for (CellModel cell: row.getCells()) {
- Cell.Builder cellBuilder = Cell.newBuilder();
- cellBuilder.setColumn(ByteString.copyFrom(cell.getColumn()));
- cellBuilder.setData(ByteString.copyFrom(cell.getValue()));
- if (cell.hasUserTimestamp()) {
- cellBuilder.setTimestamp(cell.getTimestamp());
- }
- rowBuilder.addValues(cellBuilder);
- }
- builder.addRows(rowBuilder);
- }
- return builder.build().toByteArray();
- }
-
- @Override
- public ProtobufMessageHandler getObjectFromMessage(byte[] message)
- throws IOException {
- CellSet.Builder builder = CellSet.newBuilder();
- builder.mergeFrom(message);
- for (CellSet.Row row: builder.getRowsList()) {
- RowModel rowModel = new RowModel(row.getKey().toByteArray());
- for (Cell cell: row.getValuesList()) {
- long timestamp = HConstants.LATEST_TIMESTAMP;
- if (cell.hasTimestamp()) {
- timestamp = cell.getTimestamp();
- }
- rowModel.addCell(
- new CellModel(cell.getColumn().toByteArray(), timestamp,
- cell.getData().toByteArray()));
- }
- addRow(rowModel);
- }
- return this;
- }
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/model/ColumnSchemaModel.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/model/ColumnSchemaModel.java
deleted file mode 100644
index 00ec54d..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/model/ColumnSchemaModel.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate.model;
-
-import java.io.Serializable;
-import java.util.HashMap;
-import java.util.Map;
-
-import javax.xml.bind.annotation.XmlAnyAttribute;
-import javax.xml.bind.annotation.XmlAttribute;
-import javax.xml.bind.annotation.XmlRootElement;
-import javax.xml.bind.annotation.XmlType;
-import javax.xml.namespace.QName;
-
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-
-/**
- * Representation of a column family schema.
- *
- *
- * <complexType name="ColumnSchema">
- * <attribute name="name" type="string"></attribute>
- * <anyAttribute></anyAttribute>
- * </complexType>
- *
- */
-@XmlRootElement(name="ColumnSchema")
-@XmlType(propOrder = {"name"})
-public class ColumnSchemaModel implements Serializable {
- private static final long serialVersionUID = 1L;
- private static QName BLOCKCACHE = new QName(HColumnDescriptor.BLOCKCACHE);
- private static QName BLOCKSIZE = new QName(HColumnDescriptor.BLOCKSIZE);
- private static QName BLOOMFILTER = new QName(HColumnDescriptor.BLOOMFILTER);
- private static QName COMPRESSION = new QName(HColumnDescriptor.COMPRESSION);
- private static QName IN_MEMORY = new QName(HConstants.IN_MEMORY);
- private static QName TTL = new QName(HColumnDescriptor.TTL);
- private static QName VERSIONS = new QName(HConstants.VERSIONS);
-
- private String name;
- private Map attrs = new HashMap();
-
- /**
- * Default constructor
- */
- public ColumnSchemaModel() {}
-
- /**
- * Add an attribute to the column family schema
- * @param name the attribute name
- * @param value the attribute value
- */
- public void addAttribute(String name, Object value) {
- attrs.put(new QName(name), value);
- }
-
- /**
- * @param name the attribute name
- * @return the attribute value
- */
- public String getAttribute(String name) {
- Object o = attrs.get(new QName(name));
- return o != null ? o.toString(): null;
- }
-
- /**
- * @return the column name
- */
- @XmlAttribute
- public String getName() {
- return name;
- }
-
- /**
- * @return the map for holding unspecified (user) attributes
- */
- @XmlAnyAttribute
- public Map getAny() {
- return attrs;
- }
-
- /**
- * @param name the table name
- */
- public void setName(String name) {
- this.name = name;
- }
-
- /* (non-Javadoc)
- * @see java.lang.Object#toString()
- */
- @Override
- public String toString() {
- StringBuilder sb = new StringBuilder();
- sb.append("{ NAME => '");
- sb.append(name);
- sb.append('\'');
- for (Map.Entry e: attrs.entrySet()) {
- sb.append(", ");
- sb.append(e.getKey().getLocalPart());
- sb.append(" => '");
- sb.append(e.getValue().toString());
- sb.append('\'');
- }
- sb.append(" }");
- return sb.toString();
- }
-
- // getters and setters for common schema attributes
-
- // cannot be standard bean type getters and setters, otherwise this would
- // confuse JAXB
-
- /**
- * @return true if the BLOCKCACHE attribute is present and true
- */
- public boolean __getBlockcache() {
- Object o = attrs.get(BLOCKCACHE);
- return o != null ?
- Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKCACHE;
- }
-
- /**
- * @return the value of the BLOCKSIZE attribute or its default if it is unset
- */
- public int __getBlocksize() {
- Object o = attrs.get(BLOCKSIZE);
- return o != null ?
- Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKSIZE;
- }
-
- /**
- * @return true if the BLOOMFILTER attribute is present and true
- */
- public boolean __getBloomfilter() {
- Object o = attrs.get(BLOOMFILTER);
- return o != null ?
- Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOOMFILTER;
- }
-
- /**
- * @return the value of the COMPRESSION attribute or its default if it is unset
- */
- public String __getCompression() {
- Object o = attrs.get(COMPRESSION);
- return o != null ? o.toString() : HColumnDescriptor.DEFAULT_COMPRESSION;
- }
-
- /**
- * @return true if the IN_MEMORY attribute is present and true
- */
- public boolean __getInMemory() {
- Object o = attrs.get(IN_MEMORY);
- return o != null ?
- Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_IN_MEMORY;
- }
-
- /**
- * @return the value of the TTL attribute or its default if it is unset
- */
- public int __getTTL() {
- Object o = attrs.get(TTL);
- return o != null ?
- Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_TTL;
- }
-
- /**
- * @return the value of the VERSIONS attribute or its default if it is unset
- */
- public int __getVersions() {
- Object o = attrs.get(VERSIONS);
- return o != null ?
- Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_VERSIONS;
- }
-
- /**
- * @param value the desired value of the BLOCKSIZE attribute
- */
- public void __setBlocksize(int value) {
- attrs.put(BLOCKSIZE, Integer.toString(value));
- }
-
- /**
- * @param value the desired value of the BLOCKCACHE attribute
- */
- public void __setBlockcache(boolean value) {
- attrs.put(BLOCKCACHE, Boolean.toString(value));
- }
-
- public void __setBloomfilter(boolean value) {
- attrs.put(BLOOMFILTER, Boolean.toString(value));
- }
-
- /**
- * @param value the desired value of the COMPRESSION attribute
- */
- public void __setCompression(String value) {
- attrs.put(COMPRESSION, value);
- }
-
- /**
- * @param value the desired value of the IN_MEMORY attribute
- */
- public void __setInMemory(boolean value) {
- attrs.put(IN_MEMORY, Boolean.toString(value));
- }
-
- /**
- * @param value the desired value of the TTL attribute
- */
- public void __setTTL(int value) {
- attrs.put(TTL, Integer.toString(value));
- }
-
- /**
- * @param value the desired value of the VERSIONS attribute
- */
- public void __setVersions(int value) {
- attrs.put(VERSIONS, Integer.toString(value));
- }
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/model/RowModel.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/model/RowModel.java
deleted file mode 100644
index 7fd2aab..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/model/RowModel.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate.model;
-
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.xml.bind.annotation.XmlAttribute;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
-
-/**
- * Representation of a row. A row is a related set of cells, grouped by common
- * row key. RowModels do not appear in results by themselves. They are always
- * encapsulated within CellSetModels.
- *
- *
- * <complexType name="Row">
- * <sequence>
- * <element name="key" type="base64Binary"></element>
- * <element name="cell" type="tns:Cell"
- * maxOccurs="unbounded" minOccurs="1"></element>
- * </sequence>
- * </complexType>
- *
- */
-@XmlRootElement(name="Row")
-public class RowModel implements ProtobufMessageHandler, Serializable {
- private static final long serialVersionUID = 1L;
-
- private byte[] key;
- private List cells = new ArrayList();
-
- /**
- * Default constructor
- */
- public RowModel() { }
-
- /**
- * Constructor
- * @param key the row key
- */
- public RowModel(final String key) {
- this(key.getBytes());
- }
-
- /**
- * Constructor
- * @param key the row key
- */
- public RowModel(final byte[] key) {
- this.key = key;
- cells = new ArrayList();
- }
-
- /**
- * Constructor
- * @param key the row key
- * @param cells the cells
- */
- public RowModel(final String key, final List cells) {
- this(key.getBytes(), cells);
- }
-
- /**
- * Constructor
- * @param key the row key
- * @param cells the cells
- */
- public RowModel(final byte[] key, final List cells) {
- this.key = key;
- this.cells = cells;
- }
-
- /**
- * Adds a cell to the list of cells for this row
- * @param cell the cell
- */
- public void addCell(CellModel cell) {
- cells.add(cell);
- }
-
- /**
- * @return the row key
- */
- @XmlAttribute
- public byte[] getKey() {
- return key;
- }
-
- /**
- * @param key the row key
- */
- public void setKey(byte[] key) {
- this.key = key;
- }
-
- /**
- * @return the cells
- */
- @XmlElement(name="Cell")
- public List getCells() {
- return cells;
- }
-
- @Override
- public byte[] createProtobufOutput() {
- // there is no standalone row protobuf message
- throw new UnsupportedOperationException(
- "no protobuf equivalent to RowModel");
- }
-
- @Override
- public ProtobufMessageHandler getObjectFromMessage(byte[] message)
- throws IOException {
- // there is no standalone row protobuf message
- throw new UnsupportedOperationException(
- "no protobuf equivalent to RowModel");
- }
-}
diff --git a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/model/ScannerModel.java b/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/model/ScannerModel.java
deleted file mode 100644
index b9501db..0000000
--- a/contrib/stargate/src/main/java/org/apache/hadoop/hbase/stargate/model/ScannerModel.java
+++ /dev/null
@@ -1,608 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.stargate.model;
-
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.List;
-
-import javax.xml.bind.annotation.XmlAttribute;
-import javax.xml.bind.annotation.XmlElement;
-import javax.xml.bind.annotation.XmlRootElement;
-
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
-import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
-import org.apache.hadoop.hbase.filter.CompareFilter;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
-import org.apache.hadoop.hbase.filter.InclusiveStopFilter;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.hbase.filter.PrefixFilter;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.hbase.filter.RegexStringComparator;
-import org.apache.hadoop.hbase.filter.RowFilter;
-import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-import org.apache.hadoop.hbase.filter.SkipFilter;
-import org.apache.hadoop.hbase.filter.SubstringComparator;
-import org.apache.hadoop.hbase.filter.ValueFilter;
-import org.apache.hadoop.hbase.filter.WhileMatchFilter;
-import org.apache.hadoop.hbase.filter.WritableByteArrayComparable;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.stargate.ProtobufMessageHandler;
-import org.apache.hadoop.hbase.stargate.protobuf.generated.ScannerMessage.Scanner;
-import org.apache.hadoop.hbase.util.Base64;
-import org.apache.hadoop.hbase.util.Bytes;
-
-import org.json.JSONArray;
-import org.json.JSONObject;
-import org.json.JSONStringer;
-
-import com.google.protobuf.ByteString;
-
-/**
- * A representation of Scanner parameters.
- *
- *