Index: hbase-protocol/src/main/protobuf/CoprocessorTaskMonitor.proto
===================================================================
--- hbase-protocol/src/main/protobuf/CoprocessorTaskMonitor.proto (revision 0)
+++ hbase-protocol/src/main/protobuf/CoprocessorTaskMonitor.proto (revision 23075)
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file contains protocol buffers that are used for coprocessor task monitor service.
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "CoprocessorTaskMonitorProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "Client.proto";
+import "HBase.proto";
+
+message CoprocessorTaskMonitorRequest {
+ repeated CoprocessorTaskFilterCategory categories = 1;
+}
+
+message CoprocessorTaskMonitorResponse {
+ repeated CoprocessorTaskMonitorResponseMember members = 1;
+}
+
+message CoprocessorTaskMonitorResponseMember {
+ optional string table_name = 1;
+ optional string user_name = 2;
+ optional string coprocessor_service_name = 3;
+ optional string state = 4;
+ optional string region_name = 5;
+ optional string address = 6;
+}
+
+message CoprocessorTaskFilterCategory {
+ required CategoryType type = 1;
+ required bytes value = 2;
+
+ enum CategoryType {
+ TABLE_NAME = 1;
+ USER_NAME = 2;
+ COPROCESSOR_SERVICE_NAME = 3;
+ STATE = 4;
+ REGION_NAME = 5;
+ ADDRESS = 6;
+ }
+}
+
+service CoprocessorTaskMonitorService {
+ rpc GetCoprocessorTaskState(CoprocessorTaskMonitorRequest)
+ returns(CoprocessorTaskMonitorResponse);
+}
\ No newline at end of file
Index: hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/CoprocessorTaskMonitorProtos.java
===================================================================
--- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/CoprocessorTaskMonitorProtos.java (revision 0)
+++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/CoprocessorTaskMonitorProtos.java (revision 23075)
@@ -0,0 +1,3769 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: CoprocessorTaskMonitor.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class CoprocessorTaskMonitorProtos {
+ private CoprocessorTaskMonitorProtos() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface CoprocessorTaskMonitorRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // repeated .CoprocessorTaskFilterCategory categories = 1;
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ java.util.List
+ getCategoriesList();
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory getCategories(int index);
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ int getCategoriesCount();
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategoryOrBuilder>
+ getCategoriesOrBuilderList();
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategoryOrBuilder getCategoriesOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code CoprocessorTaskMonitorRequest}
+ */
+ public static final class CoprocessorTaskMonitorRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements CoprocessorTaskMonitorRequestOrBuilder {
+ // Use CoprocessorTaskMonitorRequest.newBuilder() to construct.
+ private CoprocessorTaskMonitorRequest(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private CoprocessorTaskMonitorRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final CoprocessorTaskMonitorRequest defaultInstance;
+ public static CoprocessorTaskMonitorRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CoprocessorTaskMonitorRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private CoprocessorTaskMonitorRequest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ categories_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000001;
+ }
+ categories_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ categories_ = java.util.Collections.unmodifiableList(categories_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskMonitorRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskMonitorRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest.class, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public CoprocessorTaskMonitorRequest parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new CoprocessorTaskMonitorRequest(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ // repeated .CoprocessorTaskFilterCategory categories = 1;
+ public static final int CATEGORIES_FIELD_NUMBER = 1;
+ private java.util.List categories_;
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public java.util.List getCategoriesList() {
+ return categories_;
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategoryOrBuilder>
+ getCategoriesOrBuilderList() {
+ return categories_;
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public int getCategoriesCount() {
+ return categories_.size();
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory getCategories(int index) {
+ return categories_.get(index);
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategoryOrBuilder getCategoriesOrBuilder(
+ int index) {
+ return categories_.get(index);
+ }
+
+ private void initFields() {
+ categories_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ for (int i = 0; i < getCategoriesCount(); i++) {
+ if (!getCategories(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ for (int i = 0; i < categories_.size(); i++) {
+ output.writeMessage(1, categories_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (int i = 0; i < categories_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, categories_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest other = (org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest) obj;
+
+ boolean result = true;
+ result = result && getCategoriesList()
+ .equals(other.getCategoriesList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (getCategoriesCount() > 0) {
+ hash = (37 * hash) + CATEGORIES_FIELD_NUMBER;
+ hash = (53 * hash) + getCategoriesList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code CoprocessorTaskMonitorRequest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskMonitorRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskMonitorRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest.class, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getCategoriesFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (categoriesBuilder_ == null) {
+ categories_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ categoriesBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskMonitorRequest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest result = new org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest(this);
+ int from_bitField0_ = bitField0_;
+ if (categoriesBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ categories_ = java.util.Collections.unmodifiableList(categories_);
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.categories_ = categories_;
+ } else {
+ result.categories_ = categoriesBuilder_.build();
+ }
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest.getDefaultInstance()) return this;
+ if (categoriesBuilder_ == null) {
+ if (!other.categories_.isEmpty()) {
+ if (categories_.isEmpty()) {
+ categories_ = other.categories_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureCategoriesIsMutable();
+ categories_.addAll(other.categories_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.categories_.isEmpty()) {
+ if (categoriesBuilder_.isEmpty()) {
+ categoriesBuilder_.dispose();
+ categoriesBuilder_ = null;
+ categories_ = other.categories_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ categoriesBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getCategoriesFieldBuilder() : null;
+ } else {
+ categoriesBuilder_.addAllMessages(other.categories_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ for (int i = 0; i < getCategoriesCount(); i++) {
+ if (!getCategories(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // repeated .CoprocessorTaskFilterCategory categories = 1;
+ private java.util.List categories_ =
+ java.util.Collections.emptyList();
+ private void ensureCategoriesIsMutable() {
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+ categories_ = new java.util.ArrayList(categories_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.Builder, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategoryOrBuilder> categoriesBuilder_;
+
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public java.util.List getCategoriesList() {
+ if (categoriesBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(categories_);
+ } else {
+ return categoriesBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public int getCategoriesCount() {
+ if (categoriesBuilder_ == null) {
+ return categories_.size();
+ } else {
+ return categoriesBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory getCategories(int index) {
+ if (categoriesBuilder_ == null) {
+ return categories_.get(index);
+ } else {
+ return categoriesBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public Builder setCategories(
+ int index, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory value) {
+ if (categoriesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureCategoriesIsMutable();
+ categories_.set(index, value);
+ onChanged();
+ } else {
+ categoriesBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public Builder setCategories(
+ int index, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.Builder builderForValue) {
+ if (categoriesBuilder_ == null) {
+ ensureCategoriesIsMutable();
+ categories_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ categoriesBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public Builder addCategories(org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory value) {
+ if (categoriesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureCategoriesIsMutable();
+ categories_.add(value);
+ onChanged();
+ } else {
+ categoriesBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public Builder addCategories(
+ int index, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory value) {
+ if (categoriesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureCategoriesIsMutable();
+ categories_.add(index, value);
+ onChanged();
+ } else {
+ categoriesBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public Builder addCategories(
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.Builder builderForValue) {
+ if (categoriesBuilder_ == null) {
+ ensureCategoriesIsMutable();
+ categories_.add(builderForValue.build());
+ onChanged();
+ } else {
+ categoriesBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public Builder addCategories(
+ int index, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.Builder builderForValue) {
+ if (categoriesBuilder_ == null) {
+ ensureCategoriesIsMutable();
+ categories_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ categoriesBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public Builder addAllCategories(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory> values) {
+ if (categoriesBuilder_ == null) {
+ ensureCategoriesIsMutable();
+ super.addAll(values, categories_);
+ onChanged();
+ } else {
+ categoriesBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public Builder clearCategories() {
+ if (categoriesBuilder_ == null) {
+ categories_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ } else {
+ categoriesBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public Builder removeCategories(int index) {
+ if (categoriesBuilder_ == null) {
+ ensureCategoriesIsMutable();
+ categories_.remove(index);
+ onChanged();
+ } else {
+ categoriesBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.Builder getCategoriesBuilder(
+ int index) {
+ return getCategoriesFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategoryOrBuilder getCategoriesOrBuilder(
+ int index) {
+ if (categoriesBuilder_ == null) {
+ return categories_.get(index); } else {
+ return categoriesBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategoryOrBuilder>
+ getCategoriesOrBuilderList() {
+ if (categoriesBuilder_ != null) {
+ return categoriesBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(categories_);
+ }
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.Builder addCategoriesBuilder() {
+ return getCategoriesFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.getDefaultInstance());
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.Builder addCategoriesBuilder(
+ int index) {
+ return getCategoriesFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.getDefaultInstance());
+ }
+ /**
+ * repeated .CoprocessorTaskFilterCategory categories = 1;
+ */
+ public java.util.List
+ getCategoriesBuilderList() {
+ return getCategoriesFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.Builder, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategoryOrBuilder>
+ getCategoriesFieldBuilder() {
+ if (categoriesBuilder_ == null) {
+ categoriesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.Builder, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategoryOrBuilder>(
+ categories_,
+ ((bitField0_ & 0x00000001) == 0x00000001),
+ getParentForChildren(),
+ isClean());
+ categories_ = null;
+ }
+ return categoriesBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:CoprocessorTaskMonitorRequest)
+ }
+
+ static {
+ defaultInstance = new CoprocessorTaskMonitorRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:CoprocessorTaskMonitorRequest)
+ }
+
+ public interface CoprocessorTaskMonitorResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ java.util.List
+ getMembersList();
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember getMembers(int index);
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ int getMembersCount();
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMemberOrBuilder>
+ getMembersOrBuilderList();
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMemberOrBuilder getMembersOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code CoprocessorTaskMonitorResponse}
+ */
+ public static final class CoprocessorTaskMonitorResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements CoprocessorTaskMonitorResponseOrBuilder {
+ // Use CoprocessorTaskMonitorResponse.newBuilder() to construct.
+ private CoprocessorTaskMonitorResponse(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private CoprocessorTaskMonitorResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final CoprocessorTaskMonitorResponse defaultInstance;
+ public static CoprocessorTaskMonitorResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CoprocessorTaskMonitorResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private CoprocessorTaskMonitorResponse(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ members_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000001;
+ }
+ members_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ members_ = java.util.Collections.unmodifiableList(members_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskMonitorResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskMonitorResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse.class, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public CoprocessorTaskMonitorResponse parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new CoprocessorTaskMonitorResponse(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ // repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ public static final int MEMBERS_FIELD_NUMBER = 1;
+ private java.util.List members_;
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public java.util.List getMembersList() {
+ return members_;
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMemberOrBuilder>
+ getMembersOrBuilderList() {
+ return members_;
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public int getMembersCount() {
+ return members_.size();
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember getMembers(int index) {
+ return members_.get(index);
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMemberOrBuilder getMembersOrBuilder(
+ int index) {
+ return members_.get(index);
+ }
+
+ private void initFields() {
+ members_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ for (int i = 0; i < members_.size(); i++) {
+ output.writeMessage(1, members_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (int i = 0; i < members_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, members_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse other = (org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse) obj;
+
+ boolean result = true;
+ result = result && getMembersList()
+ .equals(other.getMembersList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (getMembersCount() > 0) {
+ hash = (37 * hash) + MEMBERS_FIELD_NUMBER;
+ hash = (53 * hash) + getMembersList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code CoprocessorTaskMonitorResponse}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskMonitorResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskMonitorResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse.class, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getMembersFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (membersBuilder_ == null) {
+ members_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ membersBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskMonitorResponse_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse result = new org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse(this);
+ int from_bitField0_ = bitField0_;
+ if (membersBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ members_ = java.util.Collections.unmodifiableList(members_);
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.members_ = members_;
+ } else {
+ result.members_ = membersBuilder_.build();
+ }
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse.getDefaultInstance()) return this;
+ if (membersBuilder_ == null) {
+ if (!other.members_.isEmpty()) {
+ if (members_.isEmpty()) {
+ members_ = other.members_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureMembersIsMutable();
+ members_.addAll(other.members_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.members_.isEmpty()) {
+ if (membersBuilder_.isEmpty()) {
+ membersBuilder_.dispose();
+ membersBuilder_ = null;
+ members_ = other.members_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ membersBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getMembersFieldBuilder() : null;
+ } else {
+ membersBuilder_.addAllMessages(other.members_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ private java.util.List members_ =
+ java.util.Collections.emptyList();
+ private void ensureMembersIsMutable() {
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+ members_ = new java.util.ArrayList(members_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.Builder, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMemberOrBuilder> membersBuilder_;
+
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public java.util.List getMembersList() {
+ if (membersBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(members_);
+ } else {
+ return membersBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public int getMembersCount() {
+ if (membersBuilder_ == null) {
+ return members_.size();
+ } else {
+ return membersBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember getMembers(int index) {
+ if (membersBuilder_ == null) {
+ return members_.get(index);
+ } else {
+ return membersBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public Builder setMembers(
+ int index, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember value) {
+ if (membersBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureMembersIsMutable();
+ members_.set(index, value);
+ onChanged();
+ } else {
+ membersBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public Builder setMembers(
+ int index, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.Builder builderForValue) {
+ if (membersBuilder_ == null) {
+ ensureMembersIsMutable();
+ members_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ membersBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public Builder addMembers(org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember value) {
+ if (membersBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureMembersIsMutable();
+ members_.add(value);
+ onChanged();
+ } else {
+ membersBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public Builder addMembers(
+ int index, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember value) {
+ if (membersBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureMembersIsMutable();
+ members_.add(index, value);
+ onChanged();
+ } else {
+ membersBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public Builder addMembers(
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.Builder builderForValue) {
+ if (membersBuilder_ == null) {
+ ensureMembersIsMutable();
+ members_.add(builderForValue.build());
+ onChanged();
+ } else {
+ membersBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public Builder addMembers(
+ int index, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.Builder builderForValue) {
+ if (membersBuilder_ == null) {
+ ensureMembersIsMutable();
+ members_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ membersBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public Builder addAllMembers(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember> values) {
+ if (membersBuilder_ == null) {
+ ensureMembersIsMutable();
+ super.addAll(values, members_);
+ onChanged();
+ } else {
+ membersBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public Builder clearMembers() {
+ if (membersBuilder_ == null) {
+ members_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ } else {
+ membersBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public Builder removeMembers(int index) {
+ if (membersBuilder_ == null) {
+ ensureMembersIsMutable();
+ members_.remove(index);
+ onChanged();
+ } else {
+ membersBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.Builder getMembersBuilder(
+ int index) {
+ return getMembersFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMemberOrBuilder getMembersOrBuilder(
+ int index) {
+ if (membersBuilder_ == null) {
+ return members_.get(index); } else {
+ return membersBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMemberOrBuilder>
+ getMembersOrBuilderList() {
+ if (membersBuilder_ != null) {
+ return membersBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(members_);
+ }
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.Builder addMembersBuilder() {
+ return getMembersFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.getDefaultInstance());
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.Builder addMembersBuilder(
+ int index) {
+ return getMembersFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.getDefaultInstance());
+ }
+ /**
+ * repeated .CoprocessorTaskMonitorResponseMember members = 1;
+ */
+ public java.util.List
+ getMembersBuilderList() {
+ return getMembersFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.Builder, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMemberOrBuilder>
+ getMembersFieldBuilder() {
+ if (membersBuilder_ == null) {
+ membersBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.Builder, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMemberOrBuilder>(
+ members_,
+ ((bitField0_ & 0x00000001) == 0x00000001),
+ getParentForChildren(),
+ isClean());
+ members_ = null;
+ }
+ return membersBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:CoprocessorTaskMonitorResponse)
+ }
+
+ static {
+ defaultInstance = new CoprocessorTaskMonitorResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:CoprocessorTaskMonitorResponse)
+ }
+
+ public interface CoprocessorTaskMonitorResponseMemberOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // optional string table_name = 1;
+ /**
+ * optional string table_name = 1;
+ */
+ boolean hasTableName();
+ /**
+ * optional string table_name = 1;
+ */
+ java.lang.String getTableName();
+ /**
+ * optional string table_name = 1;
+ */
+ com.google.protobuf.ByteString
+ getTableNameBytes();
+
+ // optional string user_name = 2;
+ /**
+ * optional string user_name = 2;
+ */
+ boolean hasUserName();
+ /**
+ * optional string user_name = 2;
+ */
+ java.lang.String getUserName();
+ /**
+ * optional string user_name = 2;
+ */
+ com.google.protobuf.ByteString
+ getUserNameBytes();
+
+ // optional string coprocessor_service_name = 3;
+ /**
+ * optional string coprocessor_service_name = 3;
+ */
+ boolean hasCoprocessorServiceName();
+ /**
+ * optional string coprocessor_service_name = 3;
+ */
+ java.lang.String getCoprocessorServiceName();
+ /**
+ * optional string coprocessor_service_name = 3;
+ */
+ com.google.protobuf.ByteString
+ getCoprocessorServiceNameBytes();
+
+ // optional string state = 4;
+ /**
+ * optional string state = 4;
+ */
+ boolean hasState();
+ /**
+ * optional string state = 4;
+ */
+ java.lang.String getState();
+ /**
+ * optional string state = 4;
+ */
+ com.google.protobuf.ByteString
+ getStateBytes();
+
+ // optional string region_name = 5;
+ /**
+ * optional string region_name = 5;
+ */
+ boolean hasRegionName();
+ /**
+ * optional string region_name = 5;
+ */
+ java.lang.String getRegionName();
+ /**
+ * optional string region_name = 5;
+ */
+ com.google.protobuf.ByteString
+ getRegionNameBytes();
+
+ // optional string address = 6;
+ /**
+ * optional string address = 6;
+ */
+ boolean hasAddress();
+ /**
+ * optional string address = 6;
+ */
+ java.lang.String getAddress();
+ /**
+ * optional string address = 6;
+ */
+ com.google.protobuf.ByteString
+ getAddressBytes();
+ }
+ /**
+ * Protobuf type {@code CoprocessorTaskMonitorResponseMember}
+ */
+ public static final class CoprocessorTaskMonitorResponseMember extends
+ com.google.protobuf.GeneratedMessage
+ implements CoprocessorTaskMonitorResponseMemberOrBuilder {
+ // Use CoprocessorTaskMonitorResponseMember.newBuilder() to construct.
+ private CoprocessorTaskMonitorResponseMember(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private CoprocessorTaskMonitorResponseMember(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final CoprocessorTaskMonitorResponseMember defaultInstance;
+ public static CoprocessorTaskMonitorResponseMember getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CoprocessorTaskMonitorResponseMember getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private CoprocessorTaskMonitorResponseMember(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ tableName_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ userName_ = input.readBytes();
+ break;
+ }
+ case 26: {
+ bitField0_ |= 0x00000004;
+ coprocessorServiceName_ = input.readBytes();
+ break;
+ }
+ case 34: {
+ bitField0_ |= 0x00000008;
+ state_ = input.readBytes();
+ break;
+ }
+ case 42: {
+ bitField0_ |= 0x00000010;
+ regionName_ = input.readBytes();
+ break;
+ }
+ case 50: {
+ bitField0_ |= 0x00000020;
+ address_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskMonitorResponseMember_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskMonitorResponseMember_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.class, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public CoprocessorTaskMonitorResponseMember parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new CoprocessorTaskMonitorResponseMember(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // optional string table_name = 1;
+ public static final int TABLE_NAME_FIELD_NUMBER = 1;
+ private java.lang.Object tableName_;
+ /**
+ * optional string table_name = 1;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string table_name = 1;
+ */
+ public java.lang.String getTableName() {
+ java.lang.Object ref = tableName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ tableName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string table_name = 1;
+ */
+ public com.google.protobuf.ByteString
+ getTableNameBytes() {
+ java.lang.Object ref = tableName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ tableName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string user_name = 2;
+ public static final int USER_NAME_FIELD_NUMBER = 2;
+ private java.lang.Object userName_;
+ /**
+ * optional string user_name = 2;
+ */
+ public boolean hasUserName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional string user_name = 2;
+ */
+ public java.lang.String getUserName() {
+ java.lang.Object ref = userName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ userName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string user_name = 2;
+ */
+ public com.google.protobuf.ByteString
+ getUserNameBytes() {
+ java.lang.Object ref = userName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ userName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string coprocessor_service_name = 3;
+ public static final int COPROCESSOR_SERVICE_NAME_FIELD_NUMBER = 3;
+ private java.lang.Object coprocessorServiceName_;
+ /**
+ * optional string coprocessor_service_name = 3;
+ */
+ public boolean hasCoprocessorServiceName() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional string coprocessor_service_name = 3;
+ */
+ public java.lang.String getCoprocessorServiceName() {
+ java.lang.Object ref = coprocessorServiceName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ coprocessorServiceName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string coprocessor_service_name = 3;
+ */
+ public com.google.protobuf.ByteString
+ getCoprocessorServiceNameBytes() {
+ java.lang.Object ref = coprocessorServiceName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ coprocessorServiceName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string state = 4;
+ public static final int STATE_FIELD_NUMBER = 4;
+ private java.lang.Object state_;
+ /**
+ * optional string state = 4;
+ */
+ public boolean hasState() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional string state = 4;
+ */
+ public java.lang.String getState() {
+ java.lang.Object ref = state_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ state_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string state = 4;
+ */
+ public com.google.protobuf.ByteString
+ getStateBytes() {
+ java.lang.Object ref = state_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ state_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string region_name = 5;
+ public static final int REGION_NAME_FIELD_NUMBER = 5;
+ private java.lang.Object regionName_;
+ /**
+ * optional string region_name = 5;
+ */
+ public boolean hasRegionName() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * optional string region_name = 5;
+ */
+ public java.lang.String getRegionName() {
+ java.lang.Object ref = regionName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ regionName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string region_name = 5;
+ */
+ public com.google.protobuf.ByteString
+ getRegionNameBytes() {
+ java.lang.Object ref = regionName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ regionName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string address = 6;
+ public static final int ADDRESS_FIELD_NUMBER = 6;
+ private java.lang.Object address_;
+ /**
+ * optional string address = 6;
+ */
+ public boolean hasAddress() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * optional string address = 6;
+ */
+ public java.lang.String getAddress() {
+ java.lang.Object ref = address_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ address_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string address = 6;
+ */
+ public com.google.protobuf.ByteString
+ getAddressBytes() {
+ java.lang.Object ref = address_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ address_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ tableName_ = "";
+ userName_ = "";
+ coprocessorServiceName_ = "";
+ state_ = "";
+ regionName_ = "";
+ address_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getTableNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getUserNameBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBytes(3, getCoprocessorServiceNameBytes());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBytes(4, getStateBytes());
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeBytes(5, getRegionNameBytes());
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ output.writeBytes(6, getAddressBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getTableNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getUserNameBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(3, getCoprocessorServiceNameBytes());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(4, getStateBytes());
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(5, getRegionNameBytes());
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(6, getAddressBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember other = (org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember) obj;
+
+ boolean result = true;
+ result = result && (hasTableName() == other.hasTableName());
+ if (hasTableName()) {
+ result = result && getTableName()
+ .equals(other.getTableName());
+ }
+ result = result && (hasUserName() == other.hasUserName());
+ if (hasUserName()) {
+ result = result && getUserName()
+ .equals(other.getUserName());
+ }
+ result = result && (hasCoprocessorServiceName() == other.hasCoprocessorServiceName());
+ if (hasCoprocessorServiceName()) {
+ result = result && getCoprocessorServiceName()
+ .equals(other.getCoprocessorServiceName());
+ }
+ result = result && (hasState() == other.hasState());
+ if (hasState()) {
+ result = result && getState()
+ .equals(other.getState());
+ }
+ result = result && (hasRegionName() == other.hasRegionName());
+ if (hasRegionName()) {
+ result = result && getRegionName()
+ .equals(other.getRegionName());
+ }
+ result = result && (hasAddress() == other.hasAddress());
+ if (hasAddress()) {
+ result = result && getAddress()
+ .equals(other.getAddress());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasTableName()) {
+ hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getTableName().hashCode();
+ }
+ if (hasUserName()) {
+ hash = (37 * hash) + USER_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getUserName().hashCode();
+ }
+ if (hasCoprocessorServiceName()) {
+ hash = (37 * hash) + COPROCESSOR_SERVICE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getCoprocessorServiceName().hashCode();
+ }
+ if (hasState()) {
+ hash = (37 * hash) + STATE_FIELD_NUMBER;
+ hash = (53 * hash) + getState().hashCode();
+ }
+ if (hasRegionName()) {
+ hash = (37 * hash) + REGION_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getRegionName().hashCode();
+ }
+ if (hasAddress()) {
+ hash = (37 * hash) + ADDRESS_FIELD_NUMBER;
+ hash = (53 * hash) + getAddress().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code CoprocessorTaskMonitorResponseMember}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMemberOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskMonitorResponseMember_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskMonitorResponseMember_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.class, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ tableName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ userName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ coprocessorServiceName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000004);
+ state_ = "";
+ bitField0_ = (bitField0_ & ~0x00000008);
+ regionName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000010);
+ address_ = "";
+ bitField0_ = (bitField0_ & ~0x00000020);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskMonitorResponseMember_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember build() {
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember result = new org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.tableName_ = tableName_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.userName_ = userName_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.coprocessorServiceName_ = coprocessorServiceName_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.state_ = state_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.regionName_ = regionName_;
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+ to_bitField0_ |= 0x00000020;
+ }
+ result.address_ = address_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember.getDefaultInstance()) return this;
+ if (other.hasTableName()) {
+ bitField0_ |= 0x00000001;
+ tableName_ = other.tableName_;
+ onChanged();
+ }
+ if (other.hasUserName()) {
+ bitField0_ |= 0x00000002;
+ userName_ = other.userName_;
+ onChanged();
+ }
+ if (other.hasCoprocessorServiceName()) {
+ bitField0_ |= 0x00000004;
+ coprocessorServiceName_ = other.coprocessorServiceName_;
+ onChanged();
+ }
+ if (other.hasState()) {
+ bitField0_ |= 0x00000008;
+ state_ = other.state_;
+ onChanged();
+ }
+ if (other.hasRegionName()) {
+ bitField0_ |= 0x00000010;
+ regionName_ = other.regionName_;
+ onChanged();
+ }
+ if (other.hasAddress()) {
+ bitField0_ |= 0x00000020;
+ address_ = other.address_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // optional string table_name = 1;
+ private java.lang.Object tableName_ = "";
+ /**
+ * optional string table_name = 1;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * optional string table_name = 1;
+ */
+ public java.lang.String getTableName() {
+ java.lang.Object ref = tableName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ tableName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string table_name = 1;
+ */
+ public com.google.protobuf.ByteString
+ getTableNameBytes() {
+ java.lang.Object ref = tableName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ tableName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string table_name = 1;
+ */
+ public Builder setTableName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ tableName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string table_name = 1;
+ */
+ public Builder clearTableName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ tableName_ = getDefaultInstance().getTableName();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string table_name = 1;
+ */
+ public Builder setTableNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ tableName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string user_name = 2;
+ private java.lang.Object userName_ = "";
+ /**
+ * optional string user_name = 2;
+ */
+ public boolean hasUserName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * optional string user_name = 2;
+ */
+ public java.lang.String getUserName() {
+ java.lang.Object ref = userName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ userName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string user_name = 2;
+ */
+ public com.google.protobuf.ByteString
+ getUserNameBytes() {
+ java.lang.Object ref = userName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ userName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string user_name = 2;
+ */
+ public Builder setUserName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ userName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string user_name = 2;
+ */
+ public Builder clearUserName() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ userName_ = getDefaultInstance().getUserName();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string user_name = 2;
+ */
+ public Builder setUserNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ userName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string coprocessor_service_name = 3;
+ private java.lang.Object coprocessorServiceName_ = "";
+ /**
+ * optional string coprocessor_service_name = 3;
+ */
+ public boolean hasCoprocessorServiceName() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional string coprocessor_service_name = 3;
+ */
+ public java.lang.String getCoprocessorServiceName() {
+ java.lang.Object ref = coprocessorServiceName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ coprocessorServiceName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string coprocessor_service_name = 3;
+ */
+ public com.google.protobuf.ByteString
+ getCoprocessorServiceNameBytes() {
+ java.lang.Object ref = coprocessorServiceName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ coprocessorServiceName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string coprocessor_service_name = 3;
+ */
+ public Builder setCoprocessorServiceName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ coprocessorServiceName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string coprocessor_service_name = 3;
+ */
+ public Builder clearCoprocessorServiceName() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ coprocessorServiceName_ = getDefaultInstance().getCoprocessorServiceName();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string coprocessor_service_name = 3;
+ */
+ public Builder setCoprocessorServiceNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ coprocessorServiceName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string state = 4;
+ private java.lang.Object state_ = "";
+ /**
+ * optional string state = 4;
+ */
+ public boolean hasState() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional string state = 4;
+ */
+ public java.lang.String getState() {
+ java.lang.Object ref = state_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ state_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string state = 4;
+ */
+ public com.google.protobuf.ByteString
+ getStateBytes() {
+ java.lang.Object ref = state_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ state_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string state = 4;
+ */
+ public Builder setState(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ state_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string state = 4;
+ */
+ public Builder clearState() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ state_ = getDefaultInstance().getState();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string state = 4;
+ */
+ public Builder setStateBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ state_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string region_name = 5;
+ private java.lang.Object regionName_ = "";
+ /**
+ * optional string region_name = 5;
+ */
+ public boolean hasRegionName() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * optional string region_name = 5;
+ */
+ public java.lang.String getRegionName() {
+ java.lang.Object ref = regionName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ regionName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string region_name = 5;
+ */
+ public com.google.protobuf.ByteString
+ getRegionNameBytes() {
+ java.lang.Object ref = regionName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ regionName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string region_name = 5;
+ */
+ public Builder setRegionName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000010;
+ regionName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string region_name = 5;
+ */
+ public Builder clearRegionName() {
+ bitField0_ = (bitField0_ & ~0x00000010);
+ regionName_ = getDefaultInstance().getRegionName();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string region_name = 5;
+ */
+ public Builder setRegionNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000010;
+ regionName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string address = 6;
+ private java.lang.Object address_ = "";
+ /**
+ * optional string address = 6;
+ */
+ public boolean hasAddress() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * optional string address = 6;
+ */
+ public java.lang.String getAddress() {
+ java.lang.Object ref = address_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ address_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string address = 6;
+ */
+ public com.google.protobuf.ByteString
+ getAddressBytes() {
+ java.lang.Object ref = address_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ address_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string address = 6;
+ */
+ public Builder setAddress(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000020;
+ address_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string address = 6;
+ */
+ public Builder clearAddress() {
+ bitField0_ = (bitField0_ & ~0x00000020);
+ address_ = getDefaultInstance().getAddress();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string address = 6;
+ */
+ public Builder setAddressBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000020;
+ address_ = value;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:CoprocessorTaskMonitorResponseMember)
+ }
+
+ static {
+ defaultInstance = new CoprocessorTaskMonitorResponseMember(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:CoprocessorTaskMonitorResponseMember)
+ }
+
+ public interface CoprocessorTaskFilterCategoryOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .CoprocessorTaskFilterCategory.CategoryType type = 1;
+ /**
+ * required .CoprocessorTaskFilterCategory.CategoryType type = 1;
+ */
+ boolean hasType();
+ /**
+ * required .CoprocessorTaskFilterCategory.CategoryType type = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.CategoryType getType();
+
+ // required bytes value = 2;
+ /**
+ * required bytes value = 2;
+ */
+ boolean hasValue();
+ /**
+ * required bytes value = 2;
+ */
+ com.google.protobuf.ByteString getValue();
+ }
+ /**
+ * Protobuf type {@code CoprocessorTaskFilterCategory}
+ */
+ public static final class CoprocessorTaskFilterCategory extends
+ com.google.protobuf.GeneratedMessage
+ implements CoprocessorTaskFilterCategoryOrBuilder {
+ // Use CoprocessorTaskFilterCategory.newBuilder() to construct.
+ private CoprocessorTaskFilterCategory(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private CoprocessorTaskFilterCategory(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final CoprocessorTaskFilterCategory defaultInstance;
+ public static CoprocessorTaskFilterCategory getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CoprocessorTaskFilterCategory getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private CoprocessorTaskFilterCategory(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ int rawValue = input.readEnum();
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.CategoryType value = org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.CategoryType.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(1, rawValue);
+ } else {
+ bitField0_ |= 0x00000001;
+ type_ = value;
+ }
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ value_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskFilterCategory_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskFilterCategory_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.class, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public CoprocessorTaskFilterCategory parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new CoprocessorTaskFilterCategory(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ /**
+ * Protobuf enum {@code CoprocessorTaskFilterCategory.CategoryType}
+ */
+ public enum CategoryType
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * TABLE_NAME = 1;
+ */
+ TABLE_NAME(0, 1),
+ /**
+ * USER_NAME = 2;
+ */
+ USER_NAME(1, 2),
+ /**
+ * COPROCESSOR_SERVICE_NAME = 3;
+ */
+ COPROCESSOR_SERVICE_NAME(2, 3),
+ /**
+ * STATE = 4;
+ */
+ STATE(3, 4),
+ /**
+ * REGION_NAME = 5;
+ */
+ REGION_NAME(4, 5),
+ /**
+ * ADDRESS = 6;
+ */
+ ADDRESS(5, 6),
+ ;
+
+ /**
+ * TABLE_NAME = 1;
+ */
+ public static final int TABLE_NAME_VALUE = 1;
+ /**
+ * USER_NAME = 2;
+ */
+ public static final int USER_NAME_VALUE = 2;
+ /**
+ * COPROCESSOR_SERVICE_NAME = 3;
+ */
+ public static final int COPROCESSOR_SERVICE_NAME_VALUE = 3;
+ /**
+ * STATE = 4;
+ */
+ public static final int STATE_VALUE = 4;
+ /**
+ * REGION_NAME = 5;
+ */
+ public static final int REGION_NAME_VALUE = 5;
+ /**
+ * ADDRESS = 6;
+ */
+ public static final int ADDRESS_VALUE = 6;
+
+
+ public final int getNumber() { return value; }
+
+ public static CategoryType valueOf(int value) {
+ switch (value) {
+ case 1: return TABLE_NAME;
+ case 2: return USER_NAME;
+ case 3: return COPROCESSOR_SERVICE_NAME;
+ case 4: return STATE;
+ case 5: return REGION_NAME;
+ case 6: return ADDRESS;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public CategoryType findValueByNumber(int number) {
+ return CategoryType.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final CategoryType[] VALUES = values();
+
+ public static CategoryType valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private CategoryType(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:CoprocessorTaskFilterCategory.CategoryType)
+ }
+
+ private int bitField0_;
+ // required .CoprocessorTaskFilterCategory.CategoryType type = 1;
+ public static final int TYPE_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.CategoryType type_;
+ /**
+ * required .CoprocessorTaskFilterCategory.CategoryType type = 1;
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .CoprocessorTaskFilterCategory.CategoryType type = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.CategoryType getType() {
+ return type_;
+ }
+
+ // required bytes value = 2;
+ public static final int VALUE_FIELD_NUMBER = 2;
+ private com.google.protobuf.ByteString value_;
+ /**
+ * required bytes value = 2;
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required bytes value = 2;
+ */
+ public com.google.protobuf.ByteString getValue() {
+ return value_;
+ }
+
+ private void initFields() {
+ type_ = org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.CategoryType.TABLE_NAME;
+ value_ = com.google.protobuf.ByteString.EMPTY;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasType()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasValue()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeEnum(1, type_.getNumber());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, value_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(1, type_.getNumber());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, value_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory other = (org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory) obj;
+
+ boolean result = true;
+ result = result && (hasType() == other.hasType());
+ if (hasType()) {
+ result = result &&
+ (getType() == other.getType());
+ }
+ result = result && (hasValue() == other.hasValue());
+ if (hasValue()) {
+ result = result && getValue()
+ .equals(other.getValue());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasType()) {
+ hash = (37 * hash) + TYPE_FIELD_NUMBER;
+ hash = (53 * hash) + hashEnum(getType());
+ }
+ if (hasValue()) {
+ hash = (37 * hash) + VALUE_FIELD_NUMBER;
+ hash = (53 * hash) + getValue().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code CoprocessorTaskFilterCategory}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategoryOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskFilterCategory_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskFilterCategory_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.class, org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ type_ = org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.CategoryType.TABLE_NAME;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ value_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.internal_static_CoprocessorTaskFilterCategory_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory build() {
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory result = new org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.type_ = type_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.value_ = value_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.getDefaultInstance()) return this;
+ if (other.hasType()) {
+ setType(other.getType());
+ }
+ if (other.hasValue()) {
+ setValue(other.getValue());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasType()) {
+
+ return false;
+ }
+ if (!hasValue()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .CoprocessorTaskFilterCategory.CategoryType type = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.CategoryType type_ = org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.CategoryType.TABLE_NAME;
+ /**
+ * required .CoprocessorTaskFilterCategory.CategoryType type = 1;
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .CoprocessorTaskFilterCategory.CategoryType type = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.CategoryType getType() {
+ return type_;
+ }
+ /**
+ * required .CoprocessorTaskFilterCategory.CategoryType type = 1;
+ */
+ public Builder setType(org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.CategoryType value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ type_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required .CoprocessorTaskFilterCategory.CategoryType type = 1;
+ */
+ public Builder clearType() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ type_ = org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.CategoryType.TABLE_NAME;
+ onChanged();
+ return this;
+ }
+
+ // required bytes value = 2;
+ private com.google.protobuf.ByteString value_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * required bytes value = 2;
+ */
+ public boolean hasValue() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required bytes value = 2;
+ */
+ public com.google.protobuf.ByteString getValue() {
+ return value_;
+ }
+ /**
+ * required bytes value = 2;
+ */
+ public Builder setValue(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ value_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required bytes value = 2;
+ */
+ public Builder clearValue() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ value_ = getDefaultInstance().getValue();
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:CoprocessorTaskFilterCategory)
+ }
+
+ static {
+ defaultInstance = new CoprocessorTaskFilterCategory(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:CoprocessorTaskFilterCategory)
+ }
+
+ /**
+ * Protobuf service {@code CoprocessorTaskMonitorService}
+ */
+ public static abstract class CoprocessorTaskMonitorService
+ implements com.google.protobuf.Service {
+ protected CoprocessorTaskMonitorService() {}
+
+ public interface Interface {
+ /**
+ * rpc GetCoprocessorTaskState(.CoprocessorTaskMonitorRequest) returns (.CoprocessorTaskMonitorResponse);
+ */
+ public abstract void getCoprocessorTaskState(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ }
+
+ public static com.google.protobuf.Service newReflectiveService(
+ final Interface impl) {
+ return new CoprocessorTaskMonitorService() {
+ @java.lang.Override
+ public void getCoprocessorTaskState(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.getCoprocessorTaskState(controller, request, done);
+ }
+
+ };
+ }
+
+ public static com.google.protobuf.BlockingService
+ newReflectiveBlockingService(final BlockingInterface impl) {
+ return new com.google.protobuf.BlockingService() {
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+
+ public final com.google.protobuf.Message callBlockingMethod(
+ com.google.protobuf.Descriptors.MethodDescriptor method,
+ com.google.protobuf.RpcController controller,
+ com.google.protobuf.Message request)
+ throws com.google.protobuf.ServiceException {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.callBlockingMethod() given method descriptor for " +
+ "wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return impl.getCoprocessorTaskState(controller, (org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest)request);
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getRequestPrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getRequestPrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getResponsePrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getResponsePrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ };
+ }
+
+ /**
+ * rpc GetCoprocessorTaskState(.CoprocessorTaskMonitorRequest) returns (.CoprocessorTaskMonitorResponse);
+ */
+ public abstract void getCoprocessorTaskState(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ public static final
+ com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.getDescriptor().getServices().get(0);
+ }
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+
+ public final void callMethod(
+ com.google.protobuf.Descriptors.MethodDescriptor method,
+ com.google.protobuf.RpcController controller,
+ com.google.protobuf.Message request,
+ com.google.protobuf.RpcCallback<
+ com.google.protobuf.Message> done) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.callMethod() given method descriptor for wrong " +
+ "service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ this.getCoprocessorTaskState(controller, (org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getRequestPrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getRequestPrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getResponsePrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getResponsePrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public static Stub newStub(
+ com.google.protobuf.RpcChannel channel) {
+ return new Stub(channel);
+ }
+
+ public static final class Stub extends org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorService implements Interface {
+ private Stub(com.google.protobuf.RpcChannel channel) {
+ this.channel = channel;
+ }
+
+ private final com.google.protobuf.RpcChannel channel;
+
+ public com.google.protobuf.RpcChannel getChannel() {
+ return channel;
+ }
+
+ public void getCoprocessorTaskState(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(0),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse.getDefaultInstance()));
+ }
+ }
+
+ public static BlockingInterface newBlockingStub(
+ com.google.protobuf.BlockingRpcChannel channel) {
+ return new BlockingStub(channel);
+ }
+
+ public interface BlockingInterface {
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse getCoprocessorTaskState(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest request)
+ throws com.google.protobuf.ServiceException;
+ }
+
+ private static final class BlockingStub implements BlockingInterface {
+ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
+ this.channel = channel;
+ }
+
+ private final com.google.protobuf.BlockingRpcChannel channel;
+
+ public org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse getCoprocessorTaskState(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(0),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse.getDefaultInstance());
+ }
+
+ }
+
+ // @@protoc_insertion_point(class_scope:CoprocessorTaskMonitorService)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_CoprocessorTaskMonitorRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_CoprocessorTaskMonitorRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_CoprocessorTaskMonitorResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_CoprocessorTaskMonitorResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_CoprocessorTaskMonitorResponseMember_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_CoprocessorTaskMonitorResponseMember_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_CoprocessorTaskFilterCategory_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_CoprocessorTaskFilterCategory_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\034CoprocessorTaskMonitor.proto\032\014Client.p" +
+ "roto\032\013HBase.proto\"S\n\035CoprocessorTaskMoni" +
+ "torRequest\0222\n\ncategories\030\001 \003(\0132\036.Coproce" +
+ "ssorTaskFilterCategory\"X\n\036CoprocessorTas" +
+ "kMonitorResponse\0226\n\007members\030\001 \003(\0132%.Copr" +
+ "ocessorTaskMonitorResponseMember\"\244\001\n$Cop" +
+ "rocessorTaskMonitorResponseMember\022\022\n\ntab" +
+ "le_name\030\001 \001(\t\022\021\n\tuser_name\030\002 \001(\t\022 \n\030copr" +
+ "ocessor_service_name\030\003 \001(\t\022\r\n\005state\030\004 \001(" +
+ "\t\022\023\n\013region_name\030\005 \001(\t\022\017\n\007address\030\006 \001(\t\"",
+ "\337\001\n\035CoprocessorTaskFilterCategory\0229\n\004typ" +
+ "e\030\001 \002(\0162+.CoprocessorTaskFilterCategory." +
+ "CategoryType\022\r\n\005value\030\002 \002(\014\"t\n\014CategoryT" +
+ "ype\022\016\n\nTABLE_NAME\020\001\022\r\n\tUSER_NAME\020\002\022\034\n\030CO" +
+ "PROCESSOR_SERVICE_NAME\020\003\022\t\n\005STATE\020\004\022\017\n\013R" +
+ "EGION_NAME\020\005\022\013\n\007ADDRESS\020\0062{\n\035Coprocessor" +
+ "TaskMonitorService\022Z\n\027GetCoprocessorTask" +
+ "State\022\036.CoprocessorTaskMonitorRequest\032\037." +
+ "CoprocessorTaskMonitorResponseBR\n*org.ap" +
+ "ache.hadoop.hbase.protobuf.generatedB\034Co",
+ "processorTaskMonitorProtosH\001\210\001\001\240\001\001"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_CoprocessorTaskMonitorRequest_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_CoprocessorTaskMonitorRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_CoprocessorTaskMonitorRequest_descriptor,
+ new java.lang.String[] { "Categories", });
+ internal_static_CoprocessorTaskMonitorResponse_descriptor =
+ getDescriptor().getMessageTypes().get(1);
+ internal_static_CoprocessorTaskMonitorResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_CoprocessorTaskMonitorResponse_descriptor,
+ new java.lang.String[] { "Members", });
+ internal_static_CoprocessorTaskMonitorResponseMember_descriptor =
+ getDescriptor().getMessageTypes().get(2);
+ internal_static_CoprocessorTaskMonitorResponseMember_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_CoprocessorTaskMonitorResponseMember_descriptor,
+ new java.lang.String[] { "TableName", "UserName", "CoprocessorServiceName", "State", "RegionName", "Address", });
+ internal_static_CoprocessorTaskFilterCategory_descriptor =
+ getDescriptor().getMessageTypes().get(3);
+ internal_static_CoprocessorTaskFilterCategory_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_CoprocessorTaskFilterCategory_descriptor,
+ new java.lang.String[] { "Type", "Value", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor(),
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
Index: hbase-server/src/test/protobuf/CoprocessorTaskGenerator.proto
===================================================================
--- hbase-server/src/test/protobuf/CoprocessorTaskGenerator.proto (revision 0)
+++ hbase-server/src/test/protobuf/CoprocessorTaskGenerator.proto (revision 23075)
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Coprocessor task monitor
+option java_package = "org.apache.hadoop.hbase.coprocessor.protobuf.generated";
+option java_outer_classname = "CoprocessorTaskGeneratorProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+message CoprocessorTaskGeneratorRequest {
+}
+
+message CoprocessorTaskGeneratorResponse {
+ required string result = 1;
+}
+
+service CoprocessorTaskGeneratorService {
+ rpc Generate(CoprocessorTaskGeneratorRequest)
+ returns(CoprocessorTaskGeneratorResponse);
+}
\ No newline at end of file
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/monitoring/TestTaskMonitor.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/monitoring/TestTaskMonitor.java (revision 22835)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/monitoring/TestTaskMonitor.java (revision 23075)
@@ -100,6 +100,65 @@
assertEquals("task 10", tm.getTasks().get(0).getDescription());
}
+ @Test
+ public void testCoprocessorTaskMonitorBasics() {
+ TaskMonitor tm = new TaskMonitor();
+ assertTrue("Task monitor should start empty",
+ tm.getCoprocessorTasks().isEmpty());
+
+ // Make a task and fetch it back out
+ MonitoredCoprocessorTask task = tm.createCoprocessorTaskStatus("test", null,
+ "org.apache.hadoop.hbase.coprocessor.LongTimeRunProtocol", "test",
+ "test_region");
+ MonitoredCoprocessorTask taskFromTm = tm.getCoprocessorTasks().get(0);
+
+ // Make sure the state is reasonable.
+ assertEquals(-1, taskFromTm.getCompletionTimestamp());
+ assertEquals(MonitoredTask.State.WAITING, taskFromTm.getState());
+
+ // Mark it as finished
+ task.markComplete("Finished!");
+ assertEquals(MonitoredTask.State.COMPLETE, task.getState());
+
+ // It should still show up in the TaskMonitor list
+ assertEquals(1, tm.getCoprocessorTasks().size());
+
+ // If we mark its completion time back a few minutes, it should get gced
+ task.expireNow();
+ assertEquals(0, tm.getCoprocessorTasks().size());
+ }
+ @Test
+ public void testCoprocessorTasksGetAbortedOnLeak() throws InterruptedException {
+ final TaskMonitor tm = new TaskMonitor();
+ assertTrue("Task monitor should start empty",
+ tm.getCoprocessorTasks().isEmpty());
+
+ final AtomicBoolean threadSuccess = new AtomicBoolean(false);
+ // Make a task in some other thread and leak it
+ Thread t = new Thread() {
+ @Override
+ public void run() {
+ MonitoredCoprocessorTask task = tm.createCoprocessorTaskStatus("test", null,
+ "org.apache.hadoop.hbase.coprocessor.LongTimeRunProtocol", "test", "test_region");
+ assertEquals(MonitoredTask.State.WAITING, task.getState());
+ task.resume("mark as running");
+ threadSuccess.set(true);
+ }
+ };
+ t.start();
+ t.join();
+ // Make sure the thread saw the correct state
+ assertTrue(threadSuccess.get());
+
+ // Make sure the leaked reference gets cleared
+ System.gc();
+ System.gc();
+ System.gc();
+
+ // Now it should be aborted
+ MonitoredTask taskFromTm = tm.getCoprocessorTasks().get(0);
+ assertEquals(MonitoredTask.State.ABORTED, taskFromTm.getState());
+ }
}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/monitoring/TestCoprocessorTaskMonitor.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/monitoring/TestCoprocessorTaskMonitor.java (revision 0)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/monitoring/TestCoprocessorTaskMonitor.java (revision 23075)
@@ -0,0 +1,175 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.monitoring;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.CoprocessorTaskMonitorClient;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorTaskGeneratorEndpoint;
+import org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos;
+import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.CategoryType;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.ServiceException;
+
+public class TestCoprocessorTaskMonitor {
+
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final byte[] FAMILY = Bytes.toBytes("cf");
+ private static HTable table;
+ private static HTable table1;
+ private static HTable table2;
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ // Test we can first start the ZK cluster by itself
+ Configuration conf = TEST_UTIL.getConfiguration();
+ conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
+ CoprocessorTaskGeneratorEndpoint.class.getName());
+ conf.setBoolean("hbase.coprocessor.task.monitor.server.enabled", true);
+ TEST_UTIL.startMiniZKCluster();
+ TEST_UTIL.startMiniCluster();
+ table = createTable(Bytes.toBytes("test"));
+ table1 = createTable(Bytes.toBytes("test1"));
+ table2 = createTable(Bytes.toBytes("test2"));
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testRunning() throws IOException {
+ CoprocessorTaskMonitorClient client = new CoprocessorTaskMonitorClient(
+ TEST_UTIL.getConfiguration(), table.getName());
+ CoprocessorTaskMonitorResponse response = client.getCoprocessorTaskState();
+ assertNotNull(response);
+ }
+
+ @Test
+ public void testGetCoprocessorState() throws ServiceException, Throwable {
+ table.coprocessorService(CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorService.class,
+ HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
+ new Batch.Call() {
+ @Override
+ public String call(CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorService instance)
+ throws IOException {
+ BlockingRpcCallback rpcCallback = new BlockingRpcCallback();
+ CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest.Builder builder = CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest
+ .newBuilder();
+ instance.generate(null, builder.build(), rpcCallback);
+ return rpcCallback.get().getResult();
+ }
+ });
+ // check the coprocessor tasks
+ CoprocessorTaskMonitorClient client1 = new CoprocessorTaskMonitorClient(
+ TEST_UTIL.getConfiguration(), table1.getName());
+ assertEquals(3, client1.getCoprocessorTaskState().getMembersCount());
+
+ CoprocessorTaskMonitorClient client2 = new CoprocessorTaskMonitorClient(
+ TEST_UTIL.getConfiguration(), table2.getName());
+ assertEquals(3, client2.getCoprocessorTaskState().getMembersCount());
+
+ CoprocessorTaskFilterCategory stateCategory1 = CoprocessorTaskFilterCategory.newBuilder()
+ .setType(CategoryType.STATE).setValue(ByteString.copyFrom(Bytes.toBytes("WAITING")))
+ .build();
+ CoprocessorTaskFilterCategory stateCategory2 = CoprocessorTaskFilterCategory.newBuilder()
+ .setType(CategoryType.STATE).setValue(ByteString.copyFrom(Bytes.toBytes("RUNNING")))
+ .build();
+ assertEquals(2, client1.getCoprocessorTaskState(stateCategory1).getMembersCount());
+ assertEquals(2, client2.getCoprocessorTaskState(stateCategory2).getMembersCount());
+
+ CoprocessorTaskFilterCategory serviceCategory1 = CoprocessorTaskFilterCategory.newBuilder()
+ .setType(CategoryType.COPROCESSOR_SERVICE_NAME)
+ .setValue(ByteString.copyFrom(Bytes.toBytes("service1"))).build();
+ CoprocessorTaskFilterCategory serviceCategory2 = CoprocessorTaskFilterCategory.newBuilder()
+ .setType(CategoryType.COPROCESSOR_SERVICE_NAME)
+ .setValue(ByteString.copyFrom(Bytes.toBytes("service2"))).build();
+ assertEquals(1, client1.getCoprocessorTaskState(serviceCategory1).getMembersCount());
+ assertEquals(1, client2.getCoprocessorTaskState(serviceCategory2).getMembersCount());
+
+ CoprocessorTaskFilterCategory regionCategory1 = CoprocessorTaskFilterCategory.newBuilder()
+ .setType(CategoryType.REGION_NAME).setValue(ByteString.copyFrom(Bytes.toBytes("region1")))
+ .build();
+ CoprocessorTaskFilterCategory regionCategory2 = CoprocessorTaskFilterCategory.newBuilder()
+ .setType(CategoryType.REGION_NAME).setValue(ByteString.copyFrom(Bytes.toBytes("region2")))
+ .build();
+ assertEquals(2, client1.getCoprocessorTaskState(regionCategory1).getMembersCount());
+ assertEquals(3, client2.getCoprocessorTaskState(regionCategory2).getMembersCount());
+
+ assertEquals(1, client2.getCoprocessorTaskState(stateCategory2, serviceCategory1)
+ .getMembersCount());
+
+ CoprocessorTaskFilterCategory tableNameCategory = CoprocessorTaskFilterCategory.newBuilder()
+ .setType(CategoryType.TABLE_NAME).setValue(ByteString.copyFrom(Bytes.toBytes("dummy")))
+ .build();
+ boolean exception = false;
+ try {
+ client1.getCoprocessorTaskState(tableNameCategory);
+ } catch(IllegalArgumentException e) {
+ exception = true;
+ }
+ assertTrue(exception);
+
+ CoprocessorTaskFilterCategory addressCategory = CoprocessorTaskFilterCategory.newBuilder()
+ .setType(CategoryType.ADDRESS).setValue(ByteString.copyFrom(Bytes.toBytes("dummyAdress")))
+ .build();
+ exception = false;
+ try {
+ client1.getCoprocessorTaskState(addressCategory);
+ } catch (IllegalArgumentException e) {
+ exception = true;
+ }
+ assertTrue(exception);
+ }
+
+ @SuppressWarnings("deprecation")
+ private static HTable createTable(byte[] tableName) throws IOException {
+ HTableDescriptor htd = new HTableDescriptor(tableName);
+ HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
+ htd.addFamily(hcd);
+ HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
+ admin.createTable(htd);
+ admin.close();
+ HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
+ return ht;
+ }
+}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/CoprocessorTaskGeneratorProtos.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/CoprocessorTaskGeneratorProtos.java (revision 0)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/CoprocessorTaskGeneratorProtos.java (revision 23075)
@@ -0,0 +1,1154 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: CoprocessorTaskGenerator.proto
+
+package org.apache.hadoop.hbase.coprocessor.protobuf.generated;
+
+public final class CoprocessorTaskGeneratorProtos {
+ private CoprocessorTaskGeneratorProtos() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface CoprocessorTaskGeneratorRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+ }
+ /**
+ * Protobuf type {@code CoprocessorTaskGeneratorRequest}
+ */
+ public static final class CoprocessorTaskGeneratorRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements CoprocessorTaskGeneratorRequestOrBuilder {
+ // Use CoprocessorTaskGeneratorRequest.newBuilder() to construct.
+ private CoprocessorTaskGeneratorRequest(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private CoprocessorTaskGeneratorRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final CoprocessorTaskGeneratorRequest defaultInstance;
+ public static CoprocessorTaskGeneratorRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CoprocessorTaskGeneratorRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private CoprocessorTaskGeneratorRequest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.internal_static_CoprocessorTaskGeneratorRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.internal_static_CoprocessorTaskGeneratorRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest.class, org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public CoprocessorTaskGeneratorRequest parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new CoprocessorTaskGeneratorRequest(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private void initFields() {
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest other = (org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest) obj;
+
+ boolean result = true;
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code CoprocessorTaskGeneratorRequest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.internal_static_CoprocessorTaskGeneratorRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.internal_static_CoprocessorTaskGeneratorRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest.class, org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.internal_static_CoprocessorTaskGeneratorRequest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest build() {
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest buildPartial() {
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest result = new org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest(this);
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest other) {
+ if (other == org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest.getDefaultInstance()) return this;
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:CoprocessorTaskGeneratorRequest)
+ }
+
+ static {
+ defaultInstance = new CoprocessorTaskGeneratorRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:CoprocessorTaskGeneratorRequest)
+ }
+
+ public interface CoprocessorTaskGeneratorResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string result = 1;
+ /**
+ * required string result = 1;
+ */
+ boolean hasResult();
+ /**
+ * required string result = 1;
+ */
+ java.lang.String getResult();
+ /**
+ * required string result = 1;
+ */
+ com.google.protobuf.ByteString
+ getResultBytes();
+ }
+ /**
+ * Protobuf type {@code CoprocessorTaskGeneratorResponse}
+ */
+ public static final class CoprocessorTaskGeneratorResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements CoprocessorTaskGeneratorResponseOrBuilder {
+ // Use CoprocessorTaskGeneratorResponse.newBuilder() to construct.
+ private CoprocessorTaskGeneratorResponse(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private CoprocessorTaskGeneratorResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final CoprocessorTaskGeneratorResponse defaultInstance;
+ public static CoprocessorTaskGeneratorResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public CoprocessorTaskGeneratorResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private CoprocessorTaskGeneratorResponse(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ result_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.internal_static_CoprocessorTaskGeneratorResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.internal_static_CoprocessorTaskGeneratorResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse.class, org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public CoprocessorTaskGeneratorResponse parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new CoprocessorTaskGeneratorResponse(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string result = 1;
+ public static final int RESULT_FIELD_NUMBER = 1;
+ private java.lang.Object result_;
+ /**
+ * required string result = 1;
+ */
+ public boolean hasResult() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string result = 1;
+ */
+ public java.lang.String getResult() {
+ java.lang.Object ref = result_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ result_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string result = 1;
+ */
+ public com.google.protobuf.ByteString
+ getResultBytes() {
+ java.lang.Object ref = result_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ result_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ result_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasResult()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getResultBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getResultBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse other = (org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse) obj;
+
+ boolean result = true;
+ result = result && (hasResult() == other.hasResult());
+ if (hasResult()) {
+ result = result && getResult()
+ .equals(other.getResult());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasResult()) {
+ hash = (37 * hash) + RESULT_FIELD_NUMBER;
+ hash = (53 * hash) + getResult().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code CoprocessorTaskGeneratorResponse}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.internal_static_CoprocessorTaskGeneratorResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.internal_static_CoprocessorTaskGeneratorResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse.class, org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ result_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.internal_static_CoprocessorTaskGeneratorResponse_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse build() {
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse buildPartial() {
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse result = new org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.result_ = result_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse other) {
+ if (other == org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse.getDefaultInstance()) return this;
+ if (other.hasResult()) {
+ bitField0_ |= 0x00000001;
+ result_ = other.result_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasResult()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string result = 1;
+ private java.lang.Object result_ = "";
+ /**
+ * required string result = 1;
+ */
+ public boolean hasResult() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string result = 1;
+ */
+ public java.lang.String getResult() {
+ java.lang.Object ref = result_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ result_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string result = 1;
+ */
+ public com.google.protobuf.ByteString
+ getResultBytes() {
+ java.lang.Object ref = result_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ result_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string result = 1;
+ */
+ public Builder setResult(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ result_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string result = 1;
+ */
+ public Builder clearResult() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ result_ = getDefaultInstance().getResult();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string result = 1;
+ */
+ public Builder setResultBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ result_ = value;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:CoprocessorTaskGeneratorResponse)
+ }
+
+ static {
+ defaultInstance = new CoprocessorTaskGeneratorResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:CoprocessorTaskGeneratorResponse)
+ }
+
+ /**
+ * Protobuf service {@code CoprocessorTaskGeneratorService}
+ */
+ public static abstract class CoprocessorTaskGeneratorService
+ implements com.google.protobuf.Service {
+ protected CoprocessorTaskGeneratorService() {}
+
+ public interface Interface {
+ /**
+ * rpc Generate(.CoprocessorTaskGeneratorRequest) returns (.CoprocessorTaskGeneratorResponse);
+ */
+ public abstract void generate(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ }
+
+ public static com.google.protobuf.Service newReflectiveService(
+ final Interface impl) {
+ return new CoprocessorTaskGeneratorService() {
+ @java.lang.Override
+ public void generate(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.generate(controller, request, done);
+ }
+
+ };
+ }
+
+ public static com.google.protobuf.BlockingService
+ newReflectiveBlockingService(final BlockingInterface impl) {
+ return new com.google.protobuf.BlockingService() {
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+
+ public final com.google.protobuf.Message callBlockingMethod(
+ com.google.protobuf.Descriptors.MethodDescriptor method,
+ com.google.protobuf.RpcController controller,
+ com.google.protobuf.Message request)
+ throws com.google.protobuf.ServiceException {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.callBlockingMethod() given method descriptor for " +
+ "wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return impl.generate(controller, (org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest)request);
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getRequestPrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getRequestPrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getResponsePrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getResponsePrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ };
+ }
+
+ /**
+ * rpc Generate(.CoprocessorTaskGeneratorRequest) returns (.CoprocessorTaskGeneratorResponse);
+ */
+ public abstract void generate(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ public static final
+ com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.getDescriptor().getServices().get(0);
+ }
+ public final com.google.protobuf.Descriptors.ServiceDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+
+ public final void callMethod(
+ com.google.protobuf.Descriptors.MethodDescriptor method,
+ com.google.protobuf.RpcController controller,
+ com.google.protobuf.Message request,
+ com.google.protobuf.RpcCallback<
+ com.google.protobuf.Message> done) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.callMethod() given method descriptor for wrong " +
+ "service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ this.generate(controller, (org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getRequestPrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getRequestPrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public final com.google.protobuf.Message
+ getResponsePrototype(
+ com.google.protobuf.Descriptors.MethodDescriptor method) {
+ if (method.getService() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "Service.getResponsePrototype() given method " +
+ "descriptor for wrong service type.");
+ }
+ switch(method.getIndex()) {
+ case 0:
+ return org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse.getDefaultInstance();
+ default:
+ throw new java.lang.AssertionError("Can't get here.");
+ }
+ }
+
+ public static Stub newStub(
+ com.google.protobuf.RpcChannel channel) {
+ return new Stub(channel);
+ }
+
+ public static final class Stub extends org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorService implements Interface {
+ private Stub(com.google.protobuf.RpcChannel channel) {
+ this.channel = channel;
+ }
+
+ private final com.google.protobuf.RpcChannel channel;
+
+ public com.google.protobuf.RpcChannel getChannel() {
+ return channel;
+ }
+
+ public void generate(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(0),
+ controller,
+ request,
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse.class,
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse.getDefaultInstance()));
+ }
+ }
+
+ public static BlockingInterface newBlockingStub(
+ com.google.protobuf.BlockingRpcChannel channel) {
+ return new BlockingStub(channel);
+ }
+
+ public interface BlockingInterface {
+ public org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse generate(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest request)
+ throws com.google.protobuf.ServiceException;
+ }
+
+ private static final class BlockingStub implements BlockingInterface {
+ private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
+ this.channel = channel;
+ }
+
+ private final com.google.protobuf.BlockingRpcChannel channel;
+
+ public org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse generate(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(0),
+ controller,
+ request,
+ org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse.getDefaultInstance());
+ }
+
+ }
+
+ // @@protoc_insertion_point(class_scope:CoprocessorTaskGeneratorService)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_CoprocessorTaskGeneratorRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_CoprocessorTaskGeneratorRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_CoprocessorTaskGeneratorResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_CoprocessorTaskGeneratorResponse_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\036CoprocessorTaskGenerator.proto\"!\n\037Copr" +
+ "ocessorTaskGeneratorRequest\"2\n Coprocess" +
+ "orTaskGeneratorResponse\022\016\n\006result\030\001 \002(\t2" +
+ "r\n\037CoprocessorTaskGeneratorService\022O\n\010Ge" +
+ "nerate\022 .CoprocessorTaskGeneratorRequest" +
+ "\032!.CoprocessorTaskGeneratorResponseB^\n6o" +
+ "rg.apache.hadoop.hbase.coprocessor.proto" +
+ "buf.generatedB\036CoprocessorTaskGeneratorP" +
+ "rotos\210\001\001\240\001\001"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_CoprocessorTaskGeneratorRequest_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_CoprocessorTaskGeneratorRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_CoprocessorTaskGeneratorRequest_descriptor,
+ new java.lang.String[] { });
+ internal_static_CoprocessorTaskGeneratorResponse_descriptor =
+ getDescriptor().getMessageTypes().get(1);
+ internal_static_CoprocessorTaskGeneratorResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_CoprocessorTaskGeneratorResponse_descriptor,
+ new java.lang.String[] { "Result", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/CoprocessorTaskGeneratorEndpoint.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/CoprocessorTaskGeneratorEndpoint.java (revision 0)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/CoprocessorTaskGeneratorEndpoint.java (revision 23075)
@@ -0,0 +1,94 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.coprocessor;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorRequest;
+import org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorResponse;
+import org.apache.hadoop.hbase.coprocessor.protobuf.generated.CoprocessorTaskGeneratorProtos.CoprocessorTaskGeneratorService;
+import org.apache.hadoop.hbase.monitoring.MonitoredCoprocessorTask;
+import org.apache.hadoop.hbase.monitoring.TaskMonitor;
+
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.Service;
+
+/**
+ * Generates the coprocessor tasks in the server side.
+ */
+public class CoprocessorTaskGeneratorEndpoint extends CoprocessorTaskGeneratorService implements
+ Coprocessor, CoprocessorService {
+
+ private static final Log LOG = LogFactory.getLog(CoprocessorTaskGeneratorEndpoint.class);
+
+ @Override
+ public Service getService() {
+ return this;
+ }
+
+ @Override
+ public void start(CoprocessorEnvironment env) throws IOException {
+ // nothing to do
+ }
+
+ @Override
+ public void stop(CoprocessorEnvironment env) throws IOException {
+ // nothing to do
+ }
+
+ @Override
+ public void generate(RpcController controller, CoprocessorTaskGeneratorRequest request,
+ RpcCallback done) {
+ // generate 6 tasks
+ // 1. 3 tasks are defined as running, the other 3 are defined are waiting.
+ // 2. 3 tasks are defined for table test1, 3 for table test2
+ // 3. 3 tasks are defined for service1, and 3 for service2
+ // 4. 3 tasks are defined for region1, and 3 for region2
+ try {
+ InetAddress localhost = InetAddress.getLocalHost();
+ MonitoredCoprocessorTask task1 = TaskMonitor.get()
+ .createCoprocessorTaskStatus(null, localhost, "service2", "test1", "region1");
+ task1.resume("running");
+ TaskMonitor.get()
+ .createCoprocessorTaskStatus(null, localhost, "service2", "test1", "region1");
+ TaskMonitor.get()
+ .createCoprocessorTaskStatus(null, localhost, "service1", "test1", "region2");
+ TaskMonitor.get().createCoprocessorTaskStatus(null,
+ localhost, "service1", "test2", "region2");
+ MonitoredCoprocessorTask task5 = TaskMonitor.get().createCoprocessorTaskStatus(null,
+ localhost, "service1", "test2", "region2");
+ task5.resume("running");
+ MonitoredCoprocessorTask task6 = TaskMonitor.get().createCoprocessorTaskStatus(null,
+ localhost, "service2", "test2", "region2");
+ task6.resume("running");
+ } catch (UnknownHostException e) {
+ LOG.error("Fail to get the local host", e);
+ }
+ done.run(CoprocessorTaskGeneratorResponse.newBuilder().setResult("done").build());
+ }
+
+}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CoprocessorTaskMonitorServer.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CoprocessorTaskMonitorServer.java (revision 0)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CoprocessorTaskMonitorServer.java (revision 23075)
@@ -0,0 +1,382 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.ipc.FifoRpcScheduler;
+import org.apache.hadoop.hbase.ipc.RequestContext;
+import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
+import org.apache.hadoop.hbase.ipc.RpcServerInterface;
+import org.apache.hadoop.hbase.monitoring.MonitoredCoprocessorTask;
+import org.apache.hadoop.hbase.monitoring.TaskMonitor;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.CategoryType;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorService;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Sleeper;
+import org.apache.hadoop.hbase.util.Strings;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.net.DNS;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+/**
+ * The coprocessor task monitor server.
+ *
+ * Users could get the state of all the coprocessor executions in the server side by the
+ * CoprocessorTaskMonitorServer.
+ */
+public class CoprocessorTaskMonitorServer implements
+ CoprocessorTaskMonitorService.BlockingInterface, Server, Runnable {
+
+ public static final Log LOG = LogFactory.getLog(CoprocessorTaskMonitorServer.class);
+ protected final Configuration conf;
+ // Server to handle client requests. Default access so can be accessed by
+ // unit tests.
+ RpcServerInterface rpcServer;
+ protected volatile boolean stopped = false;
+ // Check if we should stop every second.
+ private Sleeper sleeper;
+ private ServerName serverName;
+ protected volatile boolean abortRequested;
+ private ZooKeeperWatcher zooKeeper;
+ private final int msgInterval;
+
+ public CoprocessorTaskMonitorServer(Configuration conf) throws IOException {
+ this.conf = conf;
+ this.msgInterval = conf.getInt("hbase.coprocessor.task.monitor.msginterval", 3000);
+ this.abortRequested = false;
+ this.sleeper = new Sleeper(this.msgInterval, this);
+
+ String dnsInf = conf.get("hbase.regionserver.dns.interface", "default");
+ String dnsNs = conf.get("hbase.regionserver.dns.nameserver", "default");
+ // Server to handle client requests.
+ String hostname = conf.get("hbase.coprocessor.task.monitor.ipc.address",
+ Strings.domainNamePointerToHostName(DNS.getDefaultHost(dnsInf, dnsNs)));
+ int port = conf.getInt(HConstants.COPROCESSOR_TASK_MONITOR_PORT,
+ HConstants.DEFAULT_COPROCESSOR_TASK_MONITOR_PORT);
+ // Test that the hostname is reachable
+ InetSocketAddress initialIsa = new InetSocketAddress(hostname, port);
+ if (initialIsa.getAddress() == null) {
+ throw new IllegalArgumentException("Failed resolve of hostname " + initialIsa);
+ }
+ String name = "coprocessorTaskMonitorServer/" + initialIsa.toString();
+ int numHandlers = conf.getInt("hbase.coprocessor.task.monitor.handler.count", 1);
+ this.rpcServer = new RpcServer(this, name, getServices(), initialIsa, conf,
+ new FifoRpcScheduler(conf, numHandlers));
+ InetSocketAddress isa = this.rpcServer.getListenerAddress();
+ this.serverName = ServerName.valueOf(isa.getHostName(), isa.getPort(),
+ System.currentTimeMillis());
+ this.zooKeeper = new ZooKeeperWatcher(conf, "CoprocessorTaskMonitorServer:" + isa.getPort(),
+ this);
+ }
+
+ /**
+ * @return list of blocking services and their security info classes that this server supports
+ */
+ private List getServices() {
+ List bssi = new ArrayList(2);
+ bssi.add(new BlockingServiceAndInterface(
+ CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorService
+ .newReflectiveBlockingService(this),
+ CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorService.BlockingInterface.class));
+ return bssi;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void stop(String why) {
+ try {
+ this.stopped = true;
+ LOG.info("STOPPED: " + why);
+ // Wakes run() if it is sleeping
+ this.sleeper.skipSleepCycle();
+ } catch (Exception exp) {
+ LOG.warn("The coprocessor task monitor server did not stop", exp);
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public boolean isStopped() {
+ return this.stopped;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void abort(String why, Throwable e) {
+ String msg = "ABORTING coprocessor task monitor server " + this + ": " + why;
+ if (why != null) {
+ LOG.fatal(msg, e);
+ } else {
+ LOG.fatal(msg);
+ }
+ this.abortRequested = true;
+ stop(msg);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public boolean isAborted() {
+ return this.abortRequested;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public void run() {
+ try {
+ LOG.info("CoprocessorTaskMonitorServer is starting...");
+ this.rpcServer.start();
+ loop();
+ } catch (Throwable t) {
+ LOG.fatal("Unhandled exception. Starting shutdown the coprocessor task monitor server...", t);
+ } finally {
+ LOG.info("Stopping rpc server...");
+ if (rpcServer != null) {
+ rpcServer.stop();
+ }
+ }
+ LOG.info("CoprocessorTaskMonitorServer main thread exiting");
+ }
+
+ private void loop() {
+ while (!this.stopped) {
+ this.sleeper.sleep();
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public Configuration getConfiguration() {
+ return this.conf;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public ZooKeeperWatcher getZooKeeper() {
+ return this.zooKeeper;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public CatalogTracker getCatalogTracker() {
+ return null;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public ServerName getServerName() {
+ return this.serverName;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public CoprocessorTaskMonitorResponse getCoprocessorTaskState(RpcController controller,
+ CoprocessorTaskMonitorRequest request) throws ServiceException {
+ RequestContext rc = RequestContext.get();
+ List tasks = TaskMonitor.get().getCoprocessorTasks();
+ CoprocessorTaskMonitorResponse.Builder builder = CoprocessorTaskMonitorResponse.newBuilder();
+ if (tasks != null && !tasks.isEmpty()) {
+ String address = rc.getRemoteAddress().getHostAddress();
+ CoprocessorTaskFilterCategory addressCategory = CoprocessorTaskFilterCategory.newBuilder()
+ .setType(CategoryType.ADDRESS).setValue(ByteString.copyFrom(Bytes.toBytes(address)))
+ .build();
+ List categories = request.getCategoriesList();
+ List allCategories = new ArrayList(
+ categories == null ? 1 : categories.size() + 1);
+ allCategories.add(addressCategory);
+ if (categories != null) {
+ allCategories.addAll(categories);
+ }
+ List members = convert(tasks);
+ members = filter(members, allCategories);
+ builder.addAllMembers(members);
+ }
+ return builder.build();
+ }
+
+ /**
+ * Converts the list of MonitoredCoprocessorTask to a list of
+ * CoprocessorTaskMonitorResponseMember.
+ *
+ * @param tasks
+ * @return
+ */
+ private List convert(List tasks) {
+ List members = new ArrayList(
+ tasks.size());
+ for (MonitoredCoprocessorTask task : tasks) {
+ CoprocessorTaskMonitorResponseMember member = CoprocessorTaskMonitorResponseMember
+ .newBuilder()
+ .setAddress(
+ task.getRemoteAddress() == null ? "" : task.getRemoteAddress().getHostAddress())
+ .setCoprocessorServiceName(task.getCoprocessorServiceName())
+ .setRegionName(task.getRegionName()).setState(task.getState().toString())
+ .setTableName(task.getTableName())
+ .setUserName(task.getUserName() == null ? "" : task.getUserName()).build();
+ members.add(member);
+ }
+ return members;
+ }
+
+ private List filter(
+ List members,
+ List categories) {
+ Map> assemble = assemble(categories);
+ for (Iterator it = members.iterator(); it.hasNext();) {
+ CoprocessorTaskMonitorResponseMember member = it.next();
+ for (Entry> entry : assemble.entrySet()) {
+ boolean notMatch = false;
+ boolean found = false;
+ switch (entry.getKey()) {
+ case TABLE_NAME:
+ if (!entry.getValue().isEmpty()) {
+ for (CoprocessorTaskFilterCategory category : entry.getValue()) {
+ if (Bytes.toString(category.getValue().toByteArray()).equals(member.getTableName())) {
+ found = true;
+ break;
+ }
+ }
+ notMatch = !found;
+ }
+ break;
+ case USER_NAME:
+ if (!entry.getValue().isEmpty()) {
+ for (CoprocessorTaskFilterCategory category : entry.getValue()) {
+ if (Bytes.toString(category.getValue().toByteArray()).equals(member.getUserName())) {
+ found = true;
+ break;
+ }
+ }
+ notMatch = !found;
+ }
+ break;
+ case COPROCESSOR_SERVICE_NAME:
+ if (!entry.getValue().isEmpty()) {
+ for (CoprocessorTaskFilterCategory category : entry.getValue()) {
+ if (Bytes.toString(category.getValue().toByteArray()).equals(
+ member.getCoprocessorServiceName())) {
+ found = true;
+ break;
+ }
+ }
+ notMatch = !found;
+ }
+ break;
+ case STATE:
+ if (!entry.getValue().isEmpty()) {
+ for (CoprocessorTaskFilterCategory category : entry.getValue()) {
+ if (Bytes.toString(category.getValue().toByteArray()).equals(member.getState())) {
+ found = true;
+ break;
+ }
+ }
+ notMatch = !found;
+ }
+ break;
+ case REGION_NAME:
+ if (!entry.getValue().isEmpty()) {
+ for (CoprocessorTaskFilterCategory category : entry.getValue()) {
+ if (Bytes.toString(category.getValue().toByteArray()).equals(member.getRegionName())) {
+ found = true;
+ break;
+ }
+ }
+ notMatch = !found;
+ }
+ break;
+ case ADDRESS:
+ if (!entry.getValue().isEmpty()) {
+ for (CoprocessorTaskFilterCategory category : entry.getValue()) {
+ if (Bytes.toString(category.getValue().toByteArray()).equals(member.getAddress())) {
+ found = true;
+ break;
+ }
+ }
+ notMatch = !found;
+ }
+ break;
+ default:
+ break;
+ }
+ if (notMatch) {
+ it.remove();
+ break;
+ }
+ }
+ }
+ return members;
+ }
+
+ private Map> assemble(
+ List categories) {
+ Map> assemble =
+ new HashMap>();
+ for (CoprocessorTaskFilterCategory category : categories) {
+ List values = assemble.get(category.getType());
+ if (values == null) {
+ values = new ArrayList();
+ assemble.put(category.getType(), values);
+ }
+ values.add(category);
+ }
+ return assemble;
+ }
+}
\ No newline at end of file
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 22835)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 23075)
@@ -116,6 +116,7 @@
import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
import org.apache.hadoop.hbase.ipc.PriorityFunction;
+import org.apache.hadoop.hbase.ipc.RequestContext;
import org.apache.hadoop.hbase.ipc.RpcCallContext;
import org.apache.hadoop.hbase.ipc.RpcClient;
import org.apache.hadoop.hbase.ipc.RpcServer;
@@ -125,6 +126,8 @@
import org.apache.hadoop.hbase.ipc.ServerRpcController;
import org.apache.hadoop.hbase.master.SplitLogManager;
import org.apache.hadoop.hbase.master.TableLockManager;
+import org.apache.hadoop.hbase.monitoring.MonitoredCoprocessorTask;
+import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
@@ -455,6 +458,8 @@
/** The health check chore. */
private HealthCheckChore healthCheckChore;
+ private CoprocessorTaskMonitorServer coprocessorTaskMonitorServer;
+
/** The nonce manager chore. */
private Chore nonceManagerChore;
@@ -670,8 +675,18 @@
this.distributedLogReplay = this.conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY,
HConstants.DEFAULT_DISTRIBUTED_LOG_REPLAY_CONFIG);
+
+ boolean isCoprocessorTaskMonitorServerEnabled = conf.getBoolean(
+ "hbase.coprocessor.task.monitor.server.enabled", false);
+ if (isCoprocessorTaskMonitorServerEnabled) {
+ coprocessorTaskMonitorServer = new CoprocessorTaskMonitorServer(conf);
+ }
}
+ CoprocessorTaskMonitorServer getCoprocessorTaskMonitorServer() {
+ return this.coprocessorTaskMonitorServer;
+ }
+
/**
* @return list of blocking services and their security info classes that this server supports
*/
@@ -869,6 +884,12 @@
*/
@Override
public void run() {
+ if (this.coprocessorTaskMonitorServer != null) {
+ // start the thread of the coprocessor task monitor server.
+ Thread coprocessorTaskMonitorThread = new Thread(this.coprocessorTaskMonitorServer);
+ coprocessorTaskMonitorThread.setName("Coprocessor task monitor");
+ coprocessorTaskMonitorThread.start();
+ }
try {
// Do pre-registration initializations; zookeeper, lease threads, etc.
preRegistrationInitialization();
@@ -1769,6 +1790,9 @@
LOG.info("STOPPED: " + msg);
// Wakes run() if it is sleeping
sleeper.skipSleepCycle();
+ if (coprocessorTaskMonitorServer != null) {
+ coprocessorTaskMonitorServer.stop("Stop the coprocessor task monitor server: " + msg);
+ }
} catch (IOException exp) {
LOG.warn("The region server did not stop", exp);
}
@@ -3302,13 +3326,22 @@
@Override
public CoprocessorServiceResponse execService(final RpcController controller,
final CoprocessorServiceRequest request) throws ServiceException {
+ MonitoredCoprocessorTask task = null;
try {
requestCount.increment();
+ RequestContext rc = RequestContext.get();
HRegion region = getRegion(request.getRegion());
+ task = TaskMonitor.get().createCoprocessorTaskStatus(
+ rc.getUser() == null ? null : rc.getUser().getName(), rc.getRemoteAddress(),
+ request.getCall().getServiceName(),
+ Bytes.toString(region.getRegionInfo().getTable().getName()),
+ Bytes.toString(region.getRegionName()));
+ task.resume("running");
// ignore the passed in controller (from the serialized call)
ServerRpcController execController = new ServerRpcController();
Message result = region.execService(execController, request.getCall());
if (execController.getFailedOn() != null) {
+ task.abort("error:" + execController.getFailedOn().getMessage());
throw execController.getFailedOn();
}
CoprocessorServiceResponse.Builder builder =
@@ -3318,8 +3351,12 @@
builder.setValue(
builder.getValueBuilder().setName(result.getClass().getName())
.setValue(result.toByteString()));
+ task.markComplete("completed");
return builder.build();
} catch (IOException ie) {
+ if (task != null) {
+ task.abort("error:" + ie.getMessage());
+ }
throw new ServiceException(ie);
}
}
@@ -4601,22 +4638,39 @@
List calls = request.getCallList();
Map> futures =
new TreeMap>(Bytes.BYTES_COMPARATOR);
- for(final BatchCoprocessorServiceCall call : calls) {
- Future future = pool.submit(new Callable(){
+ for (final BatchCoprocessorServiceCall call : calls) {
+ Future future = pool.submit(new Callable() {
@Override
public Void call() throws Exception {
- HRegion region = getRegion(call.getRegion());
- // ignore the passed in controller (from the serialized call)
- ServerRpcController execController = new ServerRpcController();
- Message result = region.execService(execController, call.getCall());
- if (execController.getFailedOn() != null) {
- throw execController.getFailedOn();
+ MonitoredCoprocessorTask task = null;
+ try {
+ RequestContext rc = RequestContext.get();
+ HRegion region = getRegion(call.getRegion());
+ task = TaskMonitor.get().createCoprocessorTaskStatus(
+ rc.getUser() == null ? null : rc.getUser().getName(), rc.getRemoteAddress(),
+ call.getCall().getServiceName(),
+ Bytes.toString(region.getRegionInfo().getTable().getName()),
+ Bytes.toString(region.getRegionName()));
+ task.resume("running");
+ // ignore the passed in controller (from the serialized call)
+ ServerRpcController execController = new ServerRpcController();
+ Message result = region.execService(execController, call.getCall());
+ if (execController.getFailedOn() != null) {
+ task.abort("error:" + execController.getFailedOn().getMessage());
+ throw execController.getFailedOn();
+ }
+ callback.update(region.getRegionName(), result);
+ task.markComplete("completed");
+ return null;
+ } catch (IOException ie) {
+ if (task != null) {
+ task.abort("error:" + ie.getMessage());
+ }
+ throw new ServiceException(ie);
}
- callback.update(region.getRegionName(), result);
- return null;
}
-
+
});
futures.put(call.getCall().getRow().toByteArray(), future);
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredCoprocessorTask.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredCoprocessorTask.java (revision 0)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredCoprocessorTask.java (revision 23075)
@@ -0,0 +1,62 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.monitoring;
+
+import java.net.InetAddress;
+
+/**
+ * A MonitoredTask for a coprocessor execution.
+ */
+public interface MonitoredCoprocessorTask extends MonitoredTask {
+
+ /**
+ * Gets the user name.
+ *
+ * @return the user name.
+ */
+ String getUserName();
+
+ /**
+ * Gets the remote address.
+ *
+ * @return the remote address.
+ */
+ InetAddress getRemoteAddress();
+
+ /**
+ * Gets the table name.
+ *
+ * @return the table name.
+ */
+ String getTableName();
+
+ /**
+ * Gets the coprocessor service name.
+ *
+ * @return the coprocessor service name
+ */
+ String getCoprocessorServiceName();
+
+ /**
+ * Gets the region name.
+ *
+ * @return the region name
+ */
+ String getRegionName();
+}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredCoprocessorTaskImpl.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredCoprocessorTaskImpl.java (revision 0)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredCoprocessorTaskImpl.java (revision 23075)
@@ -0,0 +1,97 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.monitoring;
+
+import java.net.InetAddress;
+
+
+/**
+ * The implementation of the MonitoredCoprocessorTask.
+ * It contains all the necessary information of a coprocessor execution.
+ */
+public class MonitoredCoprocessorTaskImpl extends MonitoredTaskImpl implements
+ MonitoredCoprocessorTask {
+
+ protected String userName;
+ protected InetAddress remoteAddress;
+ protected String coprocessorServiceName;
+ protected String regionName;
+ protected String tableName;
+
+ public MonitoredCoprocessorTaskImpl(String userName, InetAddress remoteAddress,
+ String coprocessorServiceName, String tableName, String regionName) {
+ super();
+ this.userName = userName;
+ this.remoteAddress = remoteAddress;
+ this.coprocessorServiceName = coprocessorServiceName;
+ this.tableName = tableName;
+ this.regionName = regionName;
+ // in this implementation, WAITING indicates that the handler is not
+ // actively CP call
+ setState(State.WAITING);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public String getUserName() {
+ return this.userName;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public InetAddress getRemoteAddress() {
+ return this.remoteAddress;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public String getTableName() {
+ return this.tableName;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public String getCoprocessorServiceName() {
+ return this.coprocessorServiceName;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public String getRegionName() {
+ return this.regionName;
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public synchronized MonitoredCoprocessorTaskImpl clone() {
+ return (MonitoredCoprocessorTaskImpl) super.clone();
+ }
+}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java (revision 22835)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java (revision 23075)
@@ -23,6 +23,7 @@
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
+import java.net.InetAddress;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
@@ -54,6 +55,8 @@
private List tasks =
Lists.newArrayList();
+ private List coprocessorTasks = Lists.newArrayList();
+
/**
* Get singleton instance.
* TODO this would be better off scoped to a single daemon
@@ -93,6 +96,64 @@
return proxy;
}
+ public synchronized MonitoredCoprocessorTask createCoprocessorTaskStatus(String userName,
+ InetAddress remoteAddress, String coprocessorServiceName, String tableName, String regionName) {
+ MonitoredCoprocessorTask stat = new MonitoredCoprocessorTaskImpl(userName, remoteAddress,
+ coprocessorServiceName, tableName, regionName);
+ MonitoredCoprocessorTask proxy = (MonitoredCoprocessorTask) Proxy.newProxyInstance(stat
+ .getClass().getClassLoader(), new Class>[] { MonitoredCoprocessorTask.class },
+ new PassthroughInvocationHandler(stat));
+ TaskAndWeakRefPair pair = new TaskAndWeakRefPair(stat, proxy);
+ coprocessorTasks.add(pair);
+ return proxy;
+ }
+
+ private synchronized void purgeExpiredCpTasks() {
+ int size = 0;
+
+ for (Iterator it = coprocessorTasks.iterator(); it.hasNext();) {
+ TaskAndWeakRefPair pair = it.next();
+ MonitoredTask stat = pair.get();
+
+ if (pair.isDead()) {
+ // The class who constructed this leaked it. So we can
+ // assume it's done.
+ if (stat.getState() == MonitoredTaskImpl.State.RUNNING) {
+ LOG.warn("Status " + stat + " appears to have been leaked");
+ stat.cleanup();
+ }
+ }
+
+ if (canPurge(stat)) {
+ it.remove();
+ } else {
+ size++;
+ }
+ }
+
+ if (size > MAX_TASKS) {
+ LOG.warn("Too many actions in action monitor! Purging some.");
+ tasks = tasks.subList(size - MAX_TASKS, size);
+ }
+ }
+
+ /**
+ * Produces a list containing copies of the current state of all non-expired coprocessor-related
+ * MonitoredTasks handled by this TaskMonitor.
+ *
+ * @return all the existing coprocessor tasks.
+ */
+ public synchronized List getCoprocessorTasks() {
+ purgeExpiredCpTasks();
+ ArrayList ret = Lists.newArrayListWithCapacity(coprocessorTasks
+ .size());
+ for (TaskAndWeakRefPair pair : coprocessorTasks) {
+ MonitoredCoprocessorTask t = (MonitoredCoprocessorTask) pair.get();
+ ret.add((MonitoredCoprocessorTask) t.clone());
+ }
+ return ret;
+ }
+
private synchronized void purgeExpiredTasks() {
int size = 0;
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java (revision 22835)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java (revision 23075)
@@ -41,6 +41,7 @@
import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorService.BlockingInterface;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -385,4 +386,9 @@
public NonceGenerator getNonceGenerator() {
return null; // don't use nonces for coprocessor connection
}
+
+ @Override
+ public BlockingInterface getCoprocessorTaskMonitor(ServerName serverName) throws IOException {
+ return delegate.getCoprocessorTaskMonitor(serverName);
+ }
}
\ No newline at end of file
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java (revision 22835)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java (revision 23075)
@@ -75,6 +75,8 @@
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorService;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorService.BlockingInterface;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse;
@@ -603,6 +605,11 @@
// Client rpc instance.
private RpcClient rpcClient;
+ private final Map coprocessorMonitorServiceStubs =
+ new ConcurrentHashMap();
+ private final ConcurrentHashMap monitorsLock =
+ new ConcurrentHashMap();
+
/**
* Map of table to table {@link HRegionLocation}s.
*/
@@ -2661,6 +2668,27 @@
throws IOException {
return getHTableDescriptor(TableName.valueOf(tableName));
}
+
+ @Override
+ public BlockingInterface getCoprocessorTaskMonitor(ServerName serverName) throws IOException {
+ if (isDeadServer(serverName)) {
+ throw new RegionServerStoppedException(serverName + " is dead.");
+ }
+ String key = getStubKey(CoprocessorTaskMonitorService.BlockingInterface.class.getName(),
+ serverName.getHostAndPort());
+ this.monitorsLock.putIfAbsent(key, key);
+ CoprocessorTaskMonitorService.BlockingInterface stub = null;
+ synchronized (this.monitorsLock.get(key)) {
+ stub = (CoprocessorTaskMonitorService.BlockingInterface)this.coprocessorMonitorServiceStubs.get(key);
+ if (stub == null) {
+ BlockingRpcChannel channel = this.rpcClient.createBlockingRpcChannel(serverName,
+ user, this.rpcTimeout);
+ stub = CoprocessorTaskMonitorService.newBlockingStub(channel);
+ this.coprocessorMonitorServiceStubs.put(key, stub);
+ }
+ }
+ return stub;
+ }
}
/**
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorTaskMonitorClient.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorTaskMonitorClient.java (revision 0)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorTaskMonitorClient.java (revision 23075)
@@ -0,0 +1,206 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskFilterCategory.CategoryType;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorRequest;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponse;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorResponseMember;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorService;
+import org.apache.hadoop.hbase.util.Threads;
+
+import com.google.protobuf.ByteString;
+
+/**
+ * Used to communicate with the CoprocessorTaskMonitorServer.
+ *
+ * Get the states of the coprocessor executions from the server side.
+ */
+public class CoprocessorTaskMonitorClient {
+
+ private static final Log LOG = LogFactory.getLog(CoprocessorTaskMonitorClient.class);
+ private HConnection connection;
+ private boolean closed;
+ private TableName tableName;
+ private HTable table;
+ private int port;
+ private ExecutorService pool;
+
+ @SuppressWarnings("deprecation")
+ public CoprocessorTaskMonitorClient(Configuration conf, TableName tableName) throws IOException {
+ this.connection = HConnectionManager.getConnection(conf);
+ this.port = conf.getInt(HConstants.COPROCESSOR_TASK_MONITOR_PORT,
+ HConstants.DEFAULT_COPROCESSOR_TASK_MONITOR_PORT);
+ this.tableName = tableName;
+ this.closed = false;
+ int maxThreads = conf.getInt("hbase.coprocessor.task.monitor.client.threads.max",
+ Integer.MAX_VALUE);
+ if (maxThreads <= 0) {
+ maxThreads = 1;
+ }
+ long keepAliveTime = conf.getLong("hbase.htable.threads.keepalivetime", 60);
+ this.pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS,
+ new SynchronousQueue(),
+ Threads.newDaemonThreadFactory("hbase-coprocessor.task.monitor.client"));
+ ((ThreadPoolExecutor) this.pool).allowCoreThreadTimeOut(true);
+ this.table = new HTable(tableName, connection, pool);
+ }
+
+ public void close() throws IOException {
+ if (closed) {
+ return;
+ }
+ this.pool.shutdown();
+ if (connection != null) {
+ connection.close();
+ }
+ if (table != null) {
+ table.close();
+ }
+ closed = true;
+ }
+
+ /**
+ * Gets the coprocessor task state from the server side.
+ *
+ * @param categories
+ * @return
+ * @throws IOException
+ */
+ public CoprocessorTaskMonitorResponse getCoprocessorTaskState(
+ CoprocessorTaskFilterCategory... categories) throws IOException {
+ final List sentCategories = resolve(categories);
+ CoprocessorTaskMonitorRequest.Builder builder = CoprocessorTaskMonitorRequest.newBuilder();
+ builder.addAllCategories(sentCategories);
+ final CoprocessorTaskMonitorRequest request = builder.build();
+ Set sns = getServerNames();
+ Map> futures = new HashMap>();
+ for (ServerName sn : sns) {
+ ServerName newSn = ServerName.valueOf(sn.getHostname(), port, System.currentTimeMillis());
+ final CoprocessorTaskMonitorService.BlockingInterface service = this.connection
+ .getCoprocessorTaskMonitor(newSn);
+ Future future = pool
+ .submit(new Callable() {
+
+ @Override
+ public CoprocessorTaskMonitorResponse call() throws Exception {
+ return service.getCoprocessorTaskState(null, request);
+ }
+ });
+ futures.put(newSn.getHostAndPort(), future);
+ }
+ CoprocessorTaskMonitorResponse.Builder responseBuilder = CoprocessorTaskMonitorResponse
+ .newBuilder();
+ for (Map.Entry> entry : futures.entrySet()) {
+ try {
+ CoprocessorTaskMonitorResponse response = entry.getValue().get();
+ List members = response.getMembersList();
+ if (members != null && !members.isEmpty()) {
+ responseBuilder.addAllMembers(members);
+ }
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ LOG.error("Interrupted executing for the region server " + entry.getKey(), e);
+ throw new IOException(e);
+ } catch (ExecutionException e) {
+ LOG.error(
+ "Error to get the coprocessor task state for the region server " + entry.getKey(), e);
+ throw new IOException(e);
+ }
+ }
+ return responseBuilder.build();
+ }
+
+ /**
+ * Adds the table name category to the categories.
+ *
+ * Make sure the table name is the first filter category.
+ *
+ * @param categories
+ * @return
+ */
+ private List resolve(CoprocessorTaskFilterCategory... categories) {
+ validateCategories(categories);
+ CoprocessorTaskFilterCategory tableNameCategory = CoprocessorTaskFilterCategory.newBuilder()
+ .setType(CategoryType.TABLE_NAME).setValue(ByteString.copyFrom(tableName.getName()))
+ .build();
+ List sentCategories = new ArrayList(
+ 1 + (categories == null ? 0 : categories.length));
+ sentCategories.add(tableNameCategory);
+ if (categories != null && categories.length > 0) {
+ for (CoprocessorTaskFilterCategory category : categories) {
+ sentCategories.add(category);
+ }
+ }
+ return sentCategories;
+ }
+
+ /**
+ * Validates the categories.
+ *
+ * The CategoryType.TABLE_NAME and CategoryType.ADDRESS are not allowed in the categories.
+ *
+ * @param categories
+ */
+ private void validateCategories(CoprocessorTaskFilterCategory... categories) {
+ if (categories != null) {
+ for (CoprocessorTaskFilterCategory category : categories) {
+ if ((category.getType() == CategoryType.TABLE_NAME)
+ || (category.getType() == CategoryType.ADDRESS)) {
+ throw new IllegalArgumentException(
+ "The CategoryType.TABLE_NAME and CategoryType.ADDRESS are not allowed in the categories");
+ }
+ }
+ }
+ }
+
+ private Set getServerNames() throws IOException {
+ Set serverNames = new TreeSet();
+ NavigableMap regions = table.getRegionLocations();
+ for (Entry entry : regions.entrySet()) {
+ ServerName sn = entry.getValue();
+ serverNames.add(sn);
+ }
+ return serverNames;
+ }
+}
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java (revision 22835)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java (revision 23075)
@@ -37,6 +37,7 @@
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
+import org.apache.hadoop.hbase.protobuf.generated.CoprocessorTaskMonitorProtos.CoprocessorTaskMonitorService;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
/**
@@ -520,4 +521,13 @@
* @return Nonce generator for this HConnection; may be null if disabled in configuration.
*/
public NonceGenerator getNonceGenerator();
+
+ /**
+ * Gets the connection to the CoprocessorTaskMonitorServer of the server name.
+ *
+ * @param serverName
+ * @return
+ */
+ CoprocessorTaskMonitorService.BlockingInterface getCoprocessorTaskMonitor(ServerName serverName)
+ throws IOException;
}
\ No newline at end of file
Index: hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
===================================================================
--- hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (revision 22835)
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (revision 23075)
@@ -135,6 +135,12 @@
/** default host address */
public static final String DEFAULT_HOST = "0.0.0.0";
+ /** Parameter name for port task monitor server listens on. */
+ public static final String COPROCESSOR_TASK_MONITOR_PORT = "hbase.coprocessor.task.monitor.port";
+
+ /** default port that the task monitor server listens on */
+ public static final int DEFAULT_COPROCESSOR_TASK_MONITOR_PORT = 60050;
+
/** Parameter name for port master listens on. */
public static final String MASTER_PORT = "hbase.master.port";