From f051d4565fa2cfa5337133494ce1e5ea9da1093a Mon Sep 17 00:00:00 2001 From: chenheng Date: Thu, 29 Oct 2015 15:52:33 +0800 Subject: [PATCH] HBASE-11393 Replication TableCfs should be a PB object rather than a string --- .../hbase/client/replication/ReplicationAdmin.java | 12 +- .../hbase/client/replication/TableCFsHelper.java | 83 ++ .../hadoop/hbase/replication/ReplicationPeer.java | 3 +- .../hbase/replication/ReplicationPeerZKImpl.java | 13 +- .../hadoop/hbase/replication/ReplicationPeers.java | 12 +- .../hbase/replication/ReplicationPeersZKImpl.java | 23 +- .../hbase/protobuf/generated/TableCFsProto.java | 1472 ++++++++++++++++++++ hbase-protocol/src/main/protobuf/TableCFs.proto | 34 + .../hbase/replication/TableCfWALEntryFilter.java | 3 +- .../TestReplicationWALEntryFilters.java | 9 +- 10 files changed, 1634 insertions(+), 30 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFsHelper.java create mode 100644 hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableCFsProto.java create mode 100644 hbase-protocol/src/main/protobuf/TableCFs.proto diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java index 8bd1267..9f38cb0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -186,7 +186,7 @@ public class ReplicationAdmin implements Closeable { public void addPeer(String id, String clusterKey, String tableCFs) throws ReplicationException { this.replicationPeers.addPeer(id, - new ReplicationPeerConfig().setClusterKey(clusterKey), tableCFs); + new ReplicationPeerConfig().setClusterKey(clusterKey), TableCFsHelper.convert(tableCFs)); } /** @@ -200,7 +200,7 @@ public class ReplicationAdmin implements Closeable { */ public void addPeer(String id, ReplicationPeerConfig peerConfig, Map> tableCfs) throws ReplicationException { - this.replicationPeers.addPeer(id, peerConfig, getTableCfsStr(tableCfs)); + this.replicationPeers.addPeer(id, peerConfig, TableCFsHelper.convert(tableCfs)); } public static Map> parseTableCFsFromConfig(String tableCFsConfig) { @@ -273,6 +273,8 @@ public class ReplicationAdmin implements Closeable { return tableCfsStr; } + + /** * Removes a peer cluster and stops the replication to it. * @param id a short name that identifies the cluster @@ -334,7 +336,7 @@ public class ReplicationAdmin implements Closeable { * @param id a short name that identifies the cluster */ public String getPeerTableCFs(String id) throws ReplicationException { - return this.replicationPeers.getPeerTableCFsConfig(id); + return TableCFsHelper.convert(this.replicationPeers.getPeerTableCFsConfig(id)); } /** @@ -344,7 +346,7 @@ public class ReplicationAdmin implements Closeable { */ @Deprecated public void setPeerTableCFs(String id, String tableCFs) throws ReplicationException { - this.replicationPeers.setPeerTableCFsConfig(id, tableCFs); + this.replicationPeers.setPeerTableCFsConfig(id, TableCFsHelper.convert(tableCFs)); } /** @@ -462,7 +464,7 @@ public class ReplicationAdmin implements Closeable { */ public void setPeerTableCFs(String id, Map> tableCfs) throws ReplicationException { - this.replicationPeers.setPeerTableCFsConfig(id, getTableCfsStr(tableCfs)); + this.replicationPeers.setPeerTableCFsConfig(id, TableCFsHelper.convert(tableCfs)); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFsHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFsHelper.java new file mode 100644 index 0000000..154850e --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFsHelper.java @@ -0,0 +1,83 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client.replication; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; + +import java.util.Collection; +import java.util.List; +import java.util.Map; + +/** + * Helper for TableCFs converter. + */ +public class TableCFsHelper { + + public static TableCFsProto.TableCFs convert(Map> tableCfs) { + TableCFsProto.TableCFs.Builder tableCFsBuilder = TableCFsProto.TableCFs.newBuilder(); + if (tableCfs != null) { + TableCFsProto.TableCF.Builder tableCFBuilder = TableCFsProto.TableCF.newBuilder(); + int count = 0; + for (Map.Entry> entry : tableCfs.entrySet()) { + tableCFBuilder.setTableName(entry.getKey().toString()); + if (entry.getValue() != null && !entry.getValue().isEmpty()) { + for (String value : entry.getValue()) { + tableCFBuilder.addCfs(value); + } + } + tableCFsBuilder.addTableCfs(count++, tableCFBuilder.build()); + } + } + return tableCFsBuilder.build(); + } + + /** + * Input String Format: table1:cf1,cf2;table2:cfA,cfB;table3 + * */ + public static TableCFsProto.TableCFs convert(String tableCFs) { + return convert(ReplicationAdmin.parseTableCFsFromConfig(tableCFs)); + } + + /** + * Output String Format: table1:cf1,cf2;table2:cfA,cfB;table3 + * */ + public static String convert(TableCFsProto.TableCFs tableCFs) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i 0) { + sb.deleteCharAt(sb.length() - 1); + } + return sb.toString(); + } + + public static Map> convert2Map( + TableCFsProto.TableCFs tableCFs) { + return ReplicationAdmin.parseTableCFsFromConfig(convert(tableCFs)) ; + } + +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java index b8b5b22..450bea7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java @@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; /** * ReplicationPeer manages enabled / disabled state for the peer. @@ -68,6 +69,6 @@ public interface ReplicationPeer { * Get replicable (table, cf-list) map of this peer * @return the replicable (table, cf-list) map */ - public Map> getTableCFs(); + public TableCFsProto.TableCFs getTableCFs(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java index 3ac8007..e60dfd7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java @@ -24,6 +24,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import com.google.protobuf.InvalidProtocolBufferException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZKUtil; @@ -48,7 +50,7 @@ public class ReplicationPeerZKImpl implements ReplicationPeer, Abortable, Closea private final ReplicationPeerConfig peerConfig; private final String id; private volatile PeerState peerState; - private volatile Map> tableCFs = new HashMap>(); + private volatile TableCFsProto.TableCFs tableCFs = null; private final Configuration conf; private PeerStateTracker peerStateTracker; @@ -110,8 +112,11 @@ public class ReplicationPeerZKImpl implements ReplicationPeer, Abortable, Closea } private void readTableCFsZnode() { - String currentTableCFs = Bytes.toString(tableCFsTracker.getData(false)); - this.tableCFs = ReplicationAdmin.parseTableCFsFromConfig(currentTableCFs); + try { + this.tableCFs = TableCFsProto.TableCFs.parseFrom(tableCFsTracker.getData(false)); + } catch (InvalidProtocolBufferException e) { + LOG.error("", e); + } } @Override @@ -151,7 +156,7 @@ public class ReplicationPeerZKImpl implements ReplicationPeer, Abortable, Closea * @return the replicable (table, cf-list) map */ @Override - public Map> getTableCFs() { + public TableCFsProto.TableCFs getTableCFs() { return this.tableCFs; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java index 8e80e06..301ceda 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java @@ -25,6 +25,7 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; import org.apache.hadoop.hbase.util.Pair; /** @@ -52,7 +53,8 @@ public interface ReplicationPeers { * @param tableCFs the table and column-family list which will be replicated for this peer or null * for all table and column families */ - void addPeer(String peerId, ReplicationPeerConfig peerConfig, String tableCFs) + void addPeer(String peerId, ReplicationPeerConfig peerConfig, + TableCFsProto.TableCFs tableCFs) throws ReplicationException; /** @@ -81,21 +83,23 @@ public interface ReplicationPeers { * Get the table and column-family list string of the peer from ZK. * @param peerId a short that identifies the cluster */ - public String getPeerTableCFsConfig(String peerId) throws ReplicationException; + public TableCFsProto.TableCFs getPeerTableCFsConfig(String peerId) + throws ReplicationException; /** * Set the table and column-family list string of the peer to ZK. * @param peerId a short that identifies the cluster * @param tableCFs the table and column-family list which will be replicated for this peer */ - public void setPeerTableCFsConfig(String peerId, String tableCFs) throws ReplicationException; + public void setPeerTableCFsConfig(String peerId, TableCFsProto.TableCFs tableCFs) + throws ReplicationException; /** * Get the table and column-family-list map of the peer. * @param peerId a short that identifies the cluster * @return the table and column-family list which will be replicated for this peer */ - public Map> getTableCFs(String peerId); + public TableCFsProto.TableCFs getTableCFs(String peerId); /** * Returns the ReplicationPeer diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java index 1884469..caefd74 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java @@ -34,10 +34,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.replication.TableCFsHelper; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState; import org.apache.hadoop.hbase.util.Bytes; @@ -106,7 +108,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re } @Override - public void addPeer(String id, ReplicationPeerConfig peerConfig, String tableCFs) + public void addPeer(String id, ReplicationPeerConfig peerConfig, TableCFsProto.TableCFs tableCFs) throws ReplicationException { try { if (peerExists(id)) { @@ -129,8 +131,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re // peer-state znode. This happens while adding a peer // The peer state data is set as "ENABLED" by default. ZKUtilOp op2 = ZKUtilOp.createAndFailSilent(getPeerStateNode(id), ENABLED_ZNODE_BYTES); - String tableCFsStr = (tableCFs == null) ? "" : tableCFs; - ZKUtilOp op3 = ZKUtilOp.createAndFailSilent(getTableCFsNode(id), Bytes.toBytes(tableCFsStr)); + ZKUtilOp op3 = ZKUtilOp.createAndFailSilent(getTableCFsNode(id), tableCFs.toByteArray()); listOfOps.add(op1); listOfOps.add(op2); listOfOps.add(op3); @@ -168,13 +169,13 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re } @Override - public String getPeerTableCFsConfig(String id) throws ReplicationException { + public TableCFsProto.TableCFs getPeerTableCFsConfig(String id) throws ReplicationException { try { if (!peerExists(id)) { throw new IllegalArgumentException("peer " + id + " doesn't exist"); } try { - return Bytes.toString(ZKUtil.getData(this.zookeeper, getTableCFsNode(id))); + return TableCFsProto.TableCFs.parseFrom(ZKUtil.getData(this.zookeeper, getTableCFsNode(id))); } catch (Exception e) { throw new ReplicationException(e); } @@ -184,27 +185,27 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re } @Override - public void setPeerTableCFsConfig(String id, String tableCFsStr) throws ReplicationException { + public void setPeerTableCFsConfig(String id, TableCFsProto.TableCFs tableCFs) + throws ReplicationException { try { if (!peerExists(id)) { throw new IllegalArgumentException("Cannot set peer tableCFs because id=" + id + " does not exist."); } String tableCFsZKNode = getTableCFsNode(id); - byte[] tableCFs = Bytes.toBytes(tableCFsStr); if (ZKUtil.checkExists(this.zookeeper, tableCFsZKNode) != -1) { - ZKUtil.setData(this.zookeeper, tableCFsZKNode, tableCFs); + ZKUtil.setData(this.zookeeper, tableCFsZKNode, tableCFs.toByteArray()); } else { - ZKUtil.createAndWatch(this.zookeeper, tableCFsZKNode, tableCFs); + ZKUtil.createAndWatch(this.zookeeper, tableCFsZKNode, tableCFs.toByteArray()); } - LOG.info("Peer tableCFs with id= " + id + " is now " + tableCFsStr); + LOG.info("Peer tableCFs with id= " + id + " is now " + TableCFsHelper.convert(tableCFs)); } catch (KeeperException e) { throw new ReplicationException("Unable to change tableCFs of the peer with id=" + id, e); } } @Override - public Map> getTableCFs(String id) throws IllegalArgumentException { + public TableCFsProto.TableCFs getTableCFs(String id) throws IllegalArgumentException { ReplicationPeer replicationPeer = this.peerClusters.get(id); if (replicationPeer == null) { throw new IllegalArgumentException("Peer with id= " + id + " is not connected"); diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableCFsProto.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableCFsProto.java new file mode 100644 index 0000000..07451b2 --- /dev/null +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableCFsProto.java @@ -0,0 +1,1472 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: TableCFs.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class TableCFsProto { + private TableCFsProto() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface TableCFOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string table_name = 1; + /** + * optional string table_name = 1; + */ + boolean hasTableName(); + /** + * optional string table_name = 1; + */ + java.lang.String getTableName(); + /** + * optional string table_name = 1; + */ + com.google.protobuf.ByteString + getTableNameBytes(); + + // repeated string cfs = 2; + /** + * repeated string cfs = 2; + */ + java.util.List + getCfsList(); + /** + * repeated string cfs = 2; + */ + int getCfsCount(); + /** + * repeated string cfs = 2; + */ + java.lang.String getCfs(int index); + /** + * repeated string cfs = 2; + */ + com.google.protobuf.ByteString + getCfsBytes(int index); + } + /** + * Protobuf type {@code hbase.pb.TableCF} + */ + public static final class TableCF extends + com.google.protobuf.GeneratedMessage + implements TableCFOrBuilder { + // Use TableCF.newBuilder() to construct. + private TableCF(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableCF(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableCF defaultInstance; + public static TableCF getDefaultInstance() { + return defaultInstance; + } + + public TableCF getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableCF( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + tableName_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + cfs_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000002; + } + cfs_.add(input.readBytes()); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + cfs_ = new com.google.protobuf.UnmodifiableLazyStringList(cfs_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCF_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCF_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.class, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableCF parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableCF(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private java.lang.Object tableName_; + /** + * optional string table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string table_name = 1; + */ + public java.lang.String getTableName() { + java.lang.Object ref = tableName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + tableName_ = s; + } + return s; + } + } + /** + * optional string table_name = 1; + */ + public com.google.protobuf.ByteString + getTableNameBytes() { + java.lang.Object ref = tableName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + tableName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated string cfs = 2; + public static final int CFS_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList cfs_; + /** + * repeated string cfs = 2; + */ + public java.util.List + getCfsList() { + return cfs_; + } + /** + * repeated string cfs = 2; + */ + public int getCfsCount() { + return cfs_.size(); + } + /** + * repeated string cfs = 2; + */ + public java.lang.String getCfs(int index) { + return cfs_.get(index); + } + /** + * repeated string cfs = 2; + */ + public com.google.protobuf.ByteString + getCfsBytes(int index) { + return cfs_.getByteString(index); + } + + private void initFields() { + tableName_ = ""; + cfs_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getTableNameBytes()); + } + for (int i = 0; i < cfs_.size(); i++) { + output.writeBytes(2, cfs_.getByteString(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getTableNameBytes()); + } + { + int dataSize = 0; + for (int i = 0; i < cfs_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(cfs_.getByteString(i)); + } + size += dataSize; + size += 1 * getCfsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF other = (org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && getCfsList() + .equals(other.getCfsList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (getCfsCount() > 0) { + hash = (37 * hash) + CFS_FIELD_NUMBER; + hash = (53 * hash) + getCfsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.TableCF} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCF_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCF_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.class, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + tableName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + cfs_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCF_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF build() { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF result = new org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.tableName_ = tableName_; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + cfs_ = new com.google.protobuf.UnmodifiableLazyStringList( + cfs_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.cfs_ = cfs_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.getDefaultInstance()) return this; + if (other.hasTableName()) { + bitField0_ |= 0x00000001; + tableName_ = other.tableName_; + onChanged(); + } + if (!other.cfs_.isEmpty()) { + if (cfs_.isEmpty()) { + cfs_ = other.cfs_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureCfsIsMutable(); + cfs_.addAll(other.cfs_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string table_name = 1; + private java.lang.Object tableName_ = ""; + /** + * optional string table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string table_name = 1; + */ + public java.lang.String getTableName() { + java.lang.Object ref = tableName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + tableName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string table_name = 1; + */ + public com.google.protobuf.ByteString + getTableNameBytes() { + java.lang.Object ref = tableName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + tableName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string table_name = 1; + */ + public Builder setTableName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + tableName_ = value; + onChanged(); + return this; + } + /** + * optional string table_name = 1; + */ + public Builder clearTableName() { + bitField0_ = (bitField0_ & ~0x00000001); + tableName_ = getDefaultInstance().getTableName(); + onChanged(); + return this; + } + /** + * optional string table_name = 1; + */ + public Builder setTableNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + tableName_ = value; + onChanged(); + return this; + } + + // repeated string cfs = 2; + private com.google.protobuf.LazyStringList cfs_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureCfsIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + cfs_ = new com.google.protobuf.LazyStringArrayList(cfs_); + bitField0_ |= 0x00000002; + } + } + /** + * repeated string cfs = 2; + */ + public java.util.List + getCfsList() { + return java.util.Collections.unmodifiableList(cfs_); + } + /** + * repeated string cfs = 2; + */ + public int getCfsCount() { + return cfs_.size(); + } + /** + * repeated string cfs = 2; + */ + public java.lang.String getCfs(int index) { + return cfs_.get(index); + } + /** + * repeated string cfs = 2; + */ + public com.google.protobuf.ByteString + getCfsBytes(int index) { + return cfs_.getByteString(index); + } + /** + * repeated string cfs = 2; + */ + public Builder setCfs( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureCfsIsMutable(); + cfs_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string cfs = 2; + */ + public Builder addCfs( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureCfsIsMutable(); + cfs_.add(value); + onChanged(); + return this; + } + /** + * repeated string cfs = 2; + */ + public Builder addAllCfs( + java.lang.Iterable values) { + ensureCfsIsMutable(); + super.addAll(values, cfs_); + onChanged(); + return this; + } + /** + * repeated string cfs = 2; + */ + public Builder clearCfs() { + cfs_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * repeated string cfs = 2; + */ + public Builder addCfsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureCfsIsMutable(); + cfs_.add(value); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.TableCF) + } + + static { + defaultInstance = new TableCF(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.TableCF) + } + + public interface TableCFsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .hbase.pb.TableCF table_cfs = 1; + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + java.util.List + getTableCfsList(); + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF getTableCfs(int index); + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + int getTableCfsCount(); + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + java.util.List + getTableCfsOrBuilderList(); + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder getTableCfsOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.TableCFs} + */ + public static final class TableCFs extends + com.google.protobuf.GeneratedMessage + implements TableCFsOrBuilder { + // Use TableCFs.newBuilder() to construct. + private TableCFs(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableCFs(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableCFs defaultInstance; + public static TableCFs getDefaultInstance() { + return defaultInstance; + } + + public TableCFs getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableCFs( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + tableCfs_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + tableCfs_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + tableCfs_ = java.util.Collections.unmodifiableList(tableCfs_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCFs_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCFs_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.class, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableCFs parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableCFs(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .hbase.pb.TableCF table_cfs = 1; + public static final int TABLE_CFS_FIELD_NUMBER = 1; + private java.util.List tableCfs_; + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public java.util.List getTableCfsList() { + return tableCfs_; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public java.util.List + getTableCfsOrBuilderList() { + return tableCfs_; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public int getTableCfsCount() { + return tableCfs_.size(); + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF getTableCfs(int index) { + return tableCfs_.get(index); + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder getTableCfsOrBuilder( + int index) { + return tableCfs_.get(index); + } + + private void initFields() { + tableCfs_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < tableCfs_.size(); i++) { + output.writeMessage(1, tableCfs_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < tableCfs_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableCfs_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs other = (org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs) obj; + + boolean result = true; + result = result && getTableCfsList() + .equals(other.getTableCfsList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getTableCfsCount() > 0) { + hash = (37 * hash) + TABLE_CFS_FIELD_NUMBER; + hash = (53 * hash) + getTableCfsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.TableCFs} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCFs_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCFs_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.class, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableCfsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableCfsBuilder_ == null) { + tableCfs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + tableCfsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCFs_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs build() { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs result = new org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs(this); + int from_bitField0_ = bitField0_; + if (tableCfsBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + tableCfs_ = java.util.Collections.unmodifiableList(tableCfs_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.tableCfs_ = tableCfs_; + } else { + result.tableCfs_ = tableCfsBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.getDefaultInstance()) return this; + if (tableCfsBuilder_ == null) { + if (!other.tableCfs_.isEmpty()) { + if (tableCfs_.isEmpty()) { + tableCfs_ = other.tableCfs_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureTableCfsIsMutable(); + tableCfs_.addAll(other.tableCfs_); + } + onChanged(); + } + } else { + if (!other.tableCfs_.isEmpty()) { + if (tableCfsBuilder_.isEmpty()) { + tableCfsBuilder_.dispose(); + tableCfsBuilder_ = null; + tableCfs_ = other.tableCfs_; + bitField0_ = (bitField0_ & ~0x00000001); + tableCfsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableCfsFieldBuilder() : null; + } else { + tableCfsBuilder_.addAllMessages(other.tableCfs_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .hbase.pb.TableCF table_cfs = 1; + private java.util.List tableCfs_ = + java.util.Collections.emptyList(); + private void ensureTableCfsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + tableCfs_ = new java.util.ArrayList(tableCfs_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder> tableCfsBuilder_; + + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public java.util.List getTableCfsList() { + if (tableCfsBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableCfs_); + } else { + return tableCfsBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public int getTableCfsCount() { + if (tableCfsBuilder_ == null) { + return tableCfs_.size(); + } else { + return tableCfsBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF getTableCfs(int index) { + if (tableCfsBuilder_ == null) { + return tableCfs_.get(index); + } else { + return tableCfsBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder setTableCfs( + int index, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF value) { + if (tableCfsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableCfsIsMutable(); + tableCfs_.set(index, value); + onChanged(); + } else { + tableCfsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder setTableCfs( + int index, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder builderForValue) { + if (tableCfsBuilder_ == null) { + ensureTableCfsIsMutable(); + tableCfs_.set(index, builderForValue.build()); + onChanged(); + } else { + tableCfsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder addTableCfs(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF value) { + if (tableCfsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableCfsIsMutable(); + tableCfs_.add(value); + onChanged(); + } else { + tableCfsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder addTableCfs( + int index, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF value) { + if (tableCfsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableCfsIsMutable(); + tableCfs_.add(index, value); + onChanged(); + } else { + tableCfsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder addTableCfs( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder builderForValue) { + if (tableCfsBuilder_ == null) { + ensureTableCfsIsMutable(); + tableCfs_.add(builderForValue.build()); + onChanged(); + } else { + tableCfsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder addTableCfs( + int index, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder builderForValue) { + if (tableCfsBuilder_ == null) { + ensureTableCfsIsMutable(); + tableCfs_.add(index, builderForValue.build()); + onChanged(); + } else { + tableCfsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder addAllTableCfs( + java.lang.Iterable values) { + if (tableCfsBuilder_ == null) { + ensureTableCfsIsMutable(); + super.addAll(values, tableCfs_); + onChanged(); + } else { + tableCfsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder clearTableCfs() { + if (tableCfsBuilder_ == null) { + tableCfs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + tableCfsBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder removeTableCfs(int index) { + if (tableCfsBuilder_ == null) { + ensureTableCfsIsMutable(); + tableCfs_.remove(index); + onChanged(); + } else { + tableCfsBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder getTableCfsBuilder( + int index) { + return getTableCfsFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder getTableCfsOrBuilder( + int index) { + if (tableCfsBuilder_ == null) { + return tableCfs_.get(index); } else { + return tableCfsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public java.util.List + getTableCfsOrBuilderList() { + if (tableCfsBuilder_ != null) { + return tableCfsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableCfs_); + } + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder addTableCfsBuilder() { + return getTableCfsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder addTableCfsBuilder( + int index) { + return getTableCfsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public java.util.List + getTableCfsBuilderList() { + return getTableCfsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder> + getTableCfsFieldBuilder() { + if (tableCfsBuilder_ == null) { + tableCfsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder>( + tableCfs_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + tableCfs_ = null; + } + return tableCfsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.TableCFs) + } + + static { + defaultInstance = new TableCFs(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.TableCFs) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_TableCF_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_TableCF_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_TableCFs_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_TableCFs_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\016TableCFs.proto\022\010hbase.pb\"*\n\007TableCF\022\022\n" + + "\ntable_name\030\001 \001(\t\022\013\n\003cfs\030\002 \003(\t\"0\n\010TableC" + + "Fs\022$\n\ttable_cfs\030\001 \003(\0132\021.hbase.pb.TableCF" + + "B@\n*org.apache.hadoop.hbase.protobuf.gen" + + "eratedB\rTableCFsProtoH\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_hbase_pb_TableCF_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_hbase_pb_TableCF_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_TableCF_descriptor, + new java.lang.String[] { "TableName", "Cfs", }); + internal_static_hbase_pb_TableCFs_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_hbase_pb_TableCFs_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_TableCFs_descriptor, + new java.lang.String[] { "TableCfs", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hbase-protocol/src/main/protobuf/TableCFs.proto b/hbase-protocol/src/main/protobuf/TableCFs.proto new file mode 100644 index 0000000..876de43 --- /dev/null +++ b/hbase-protocol/src/main/protobuf/TableCFs.proto @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Cell and KeyValue protos +package hbase.pb; + +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "TableCFsProto"; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +message TableCF { + optional string table_name = 1; + repeated string cfs = 2; +} + +message TableCFs { + repeated TableCF table_cfs = 1; +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java index 0cbbcef..46bcf6f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java @@ -27,6 +27,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.replication.TableCFsHelper; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL.Entry; @@ -46,7 +47,7 @@ public class TableCfWALEntryFilter implements WALEntryFilter { Map> tableCFs = null; try { - tableCFs = this.peer.getTableCFs(); + tableCFs = TableCFsHelper.convert2Map(this.peer.getTableCFs()); } catch (IllegalArgumentException e) { LOG.error("should not happen: can't get tableCFs for peer " + peer.getId() + ", degenerate as if it's not configured by keeping tableCFs==null"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java index 22c421d..8a022b2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.replication.TableCFsHelper; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.TableName; @@ -213,7 +214,7 @@ public class TestReplicationWALEntryFilters { // empty map userEntry = createEntry(a, b, c); Map> tableCfs = new HashMap>(); - when(peer.getTableCFs()).thenReturn(tableCfs); + when(peer.getTableCFs()).thenReturn(TableCFsHelper.convert(tableCfs)); filter = new TableCfWALEntryFilter(peer); assertEquals(null, filter.filter(userEntry)); @@ -221,7 +222,7 @@ public class TestReplicationWALEntryFilters { userEntry = createEntry(a, b, c); tableCfs = new HashMap>(); tableCfs.put(TableName.valueOf("bar"), null); - when(peer.getTableCFs()).thenReturn(tableCfs); + when(peer.getTableCFs()).thenReturn(TableCFsHelper.convert(tableCfs)); filter = new TableCfWALEntryFilter(peer); assertEquals(null, filter.filter(userEntry)); @@ -229,7 +230,7 @@ public class TestReplicationWALEntryFilters { userEntry = createEntry(a, b, c); tableCfs = new HashMap>(); tableCfs.put(TableName.valueOf("foo"), Lists.newArrayList("a")); - when(peer.getTableCFs()).thenReturn(tableCfs); + when(peer.getTableCFs()).thenReturn(TableCFsHelper.convert(tableCfs)); filter = new TableCfWALEntryFilter(peer); assertEquals(createEntry(a), filter.filter(userEntry)); @@ -237,7 +238,7 @@ public class TestReplicationWALEntryFilters { userEntry = createEntry(a, b, c, d); tableCfs = new HashMap>(); tableCfs.put(TableName.valueOf("foo"), Lists.newArrayList("a", "c")); - when(peer.getTableCFs()).thenReturn(tableCfs); + when(peer.getTableCFs()).thenReturn(TableCFsHelper.convert(tableCfs)); filter = new TableCfWALEntryFilter(peer); assertEquals(createEntry(a,c), filter.filter(userEntry)); } -- 1.9.3 (Apple Git-50)