From 0fecb2cbf027719dcaca10114ad93acce6df31e5 Mon Sep 17 00:00:00 2001 From: chenheng Date: Mon, 2 Nov 2015 17:42:31 +0800 Subject: [PATCH] HBASE-11393 Replication TableCfs should be a PB object rather than a string --- .../hbase/client/replication/ReplicationAdmin.java | 123 +- .../hbase/client/replication/TableCFsHelper.java | 159 +++ .../hadoop/hbase/replication/ReplicationPeer.java | 7 +- .../hbase/replication/ReplicationPeerZKImpl.java | 22 +- .../hadoop/hbase/replication/ReplicationPeers.java | 18 +- .../hbase/replication/ReplicationPeersZKImpl.java | 32 +- .../hbase/protobuf/generated/TableCFsProto.java | 1472 ++++++++++++++++++++ hbase-protocol/src/main/protobuf/TableCFs.proto | 34 + .../hbase/replication/TableCfWALEntryFilter.java | 41 +- .../client/replication/TestReplicationAdmin.java | 31 - .../replication/TestPerTableCFReplication.java | 103 +- .../TestReplicationWALEntryFilters.java | 13 +- ...stRegionReplicaReplicationEndpointNoMaster.java | 3 +- 13 files changed, 1883 insertions(+), 175 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFsHelper.java create mode 100644 hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableCFsProto.java create mode 100644 hbase-protocol/src/main/protobuf/TableCFs.proto diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java index 8bd1267..1347185 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeer; @@ -186,7 +187,7 @@ public class ReplicationAdmin implements Closeable { public void addPeer(String id, String clusterKey, String tableCFs) throws ReplicationException { this.replicationPeers.addPeer(id, - new ReplicationPeerConfig().setClusterKey(clusterKey), tableCFs); + new ReplicationPeerConfig().setClusterKey(clusterKey), TableCFsHelper.convert(tableCFs)); } /** @@ -200,7 +201,7 @@ public class ReplicationAdmin implements Closeable { */ public void addPeer(String id, ReplicationPeerConfig peerConfig, Map> tableCfs) throws ReplicationException { - this.replicationPeers.addPeer(id, peerConfig, getTableCfsStr(tableCfs)); + this.replicationPeers.addPeer(id, peerConfig, TableCFsHelper.convert(tableCfs)); } public static Map> parseTableCFsFromConfig(String tableCFsConfig) { @@ -252,27 +253,6 @@ public class ReplicationAdmin implements Closeable { return tableCFsMap; } - @VisibleForTesting - static String getTableCfsStr(Map> tableCfs) { - String tableCfsStr = null; - if (tableCfs != null) { - // Format: table1:cf1,cf2;table2:cfA,cfB;table3 - StringBuilder builder = new StringBuilder(); - for (Entry> entry : tableCfs.entrySet()) { - if (builder.length() > 0) { - builder.append(";"); - } - builder.append(entry.getKey()); - if (entry.getValue() != null && !entry.getValue().isEmpty()) { - builder.append(":"); - builder.append(StringUtils.join(entry.getValue(), ",")); - } - } - tableCfsStr = builder.toString(); - } - return tableCfsStr; - } - /** * Removes a peer cluster and stops the replication to it. * @param id a short name that identifies the cluster @@ -334,7 +314,7 @@ public class ReplicationAdmin implements Closeable { * @param id a short name that identifies the cluster */ public String getPeerTableCFs(String id) throws ReplicationException { - return this.replicationPeers.getPeerTableCFsConfig(id); + return TableCFsHelper.convert(this.replicationPeers.getPeerTableCFsConfig(id)); } /** @@ -344,7 +324,7 @@ public class ReplicationAdmin implements Closeable { */ @Deprecated public void setPeerTableCFs(String id, String tableCFs) throws ReplicationException { - this.replicationPeers.setPeerTableCFsConfig(id, tableCFs); + this.replicationPeers.setPeerTableCFsConfig(id, TableCFsHelper.convert(tableCFs)); } /** @@ -368,33 +348,37 @@ public class ReplicationAdmin implements Closeable { if (tableCfs == null) { throw new ReplicationException("tableCfs is null"); } - Map> preTableCfs = parseTableCFsFromConfig(getPeerTableCFs(id)); - if (preTableCfs == null) { - setPeerTableCFs(id, tableCfs); + TableCFsProto.TableCFs tableCFs = this.replicationPeers.getPeerTableCFsConfig(id); + if (tableCFs == null) { + this.replicationPeers.setPeerTableCFsConfig(id, TableCFsHelper.convert(tableCfs)); return; } - + TableCFsProto.TableCFs.Builder builder = tableCFs.toBuilder(); for (Map.Entry> entry : tableCfs.entrySet()) { TableName table = entry.getKey(); Collection appendCfs = entry.getValue(); - if (preTableCfs.containsKey(table)) { - List cfs = preTableCfs.get(table); - if (cfs == null || appendCfs == null) { - preTableCfs.put(table, null); - } else { - Set cfSet = new HashSet(cfs); - cfSet.addAll(appendCfs); - preTableCfs.put(table, Lists.newArrayList(cfSet)); + TableCFsProto.TableCF tableCF = TableCFsHelper.getTableCF(tableCFs, table.toString()); + if (tableCF != null) { + if (appendCfs != null) { + TableCFsProto.TableCF.Builder tableCFBuilder = tableCF.toBuilder(); + for (String cf : appendCfs) { + if (!tableCF.getCfsList().contains(cf)) { + tableCFBuilder.addCfs(cf); + } + } } } else { - if (appendCfs == null || appendCfs.isEmpty()) { - preTableCfs.put(table, null); - } else { - preTableCfs.put(table, Lists.newArrayList(appendCfs)); + TableCFsProto.TableCF.Builder tableCFBuilder = TableCFsProto.TableCF.newBuilder(); + tableCFBuilder.setTableName(table.toString()); + if (appendCfs != null && !appendCfs.isEmpty()) { + for (String cf : appendCfs) { + tableCFBuilder.addCfs(cf); + } } + builder.addTableCfs(tableCFBuilder.build()); } } - setPeerTableCFs(id, preTableCfs); + this.replicationPeers.setPeerTableCFsConfig(id, builder.build()); } /** @@ -410,46 +394,53 @@ public class ReplicationAdmin implements Closeable { /** * Remove some table-cfs from config of the specified peer * @param id a short name that identifies the cluster - * @param tableCfs A map from tableName to column family names + * @param cfs A map from tableName to column family names * @throws ReplicationException */ - public void removePeerTableCFs(String id, Map> tableCfs) + public void removePeerTableCFs(String id, Map> cfs) throws ReplicationException { - if (tableCfs == null) { - throw new ReplicationException("tableCfs is null"); + if (cfs == null) { + throw new ReplicationException("cfs is null"); } - - Map> preTableCfs = parseTableCFsFromConfig(getPeerTableCFs(id)); - if (preTableCfs == null) { + TableCFsProto.TableCFs tableCFs = this.replicationPeers.getPeerTableCFsConfig(id); + if (tableCFs == null) { throw new ReplicationException("Table-Cfs for peer" + id + " is null"); } - for (Map.Entry> entry: tableCfs.entrySet()) { + TableCFsProto.TableCFs.Builder tableCFsBuilder = tableCFs.toBuilder(); + for (Map.Entry> entry: cfs.entrySet()) { TableName table = entry.getKey(); Collection removeCfs = entry.getValue(); - if (preTableCfs.containsKey(table)) { - List cfs = preTableCfs.get(table); - if (cfs == null && removeCfs == null) { - preTableCfs.remove(table); - } else if (cfs != null && removeCfs != null) { - Set cfSet = new HashSet(cfs); - cfSet.removeAll(removeCfs); - if (cfSet.isEmpty()) { - preTableCfs.remove(table); - } else { - preTableCfs.put(table, Lists.newArrayList(cfSet)); + TableCFsProto.TableCF tableCF = TableCFsHelper.getTableCF(tableCFs, table.toString()); + if (tableCF != null) { + // remove the whole table + if (tableCF.getCfsCount() == 0 && removeCfs == null) { + tableCFsBuilder.removeTableCfs( + TableCFsHelper.getTableCFIndex(tableCFs, table.toString())); + } else if (tableCF.getCfsCount() > 0 && removeCfs != null) { + TableCFsProto.TableCF.Builder tableCFBuilder = tableCF.toBuilder(); + Set sets = new HashSet(tableCF.getCfsList()); + sets.removeAll(removeCfs); + tableCFBuilder.clearCfs(); + for (String cf : sets) { + tableCFBuilder.addCfs(cf); + } + if (tableCFBuilder.getCfsCount() == 0) { + tableCFsBuilder.removeTableCfs( + TableCFsHelper.getTableCFIndex(tableCFs, table.toString())); } - } else if (cfs == null && removeCfs != null) { + } else if (tableCF.getCfsCount() == 0 && removeCfs != null) { throw new ReplicationException("Cannot remove cf of table: " + table + " which doesn't specify cfs from table-cfs config in peer: " + id); - } else if (cfs != null && removeCfs == null) { + } else if (tableCF.getCfsCount() > 0 && removeCfs == null) { throw new ReplicationException("Cannot remove table: " + table + " which has specified cfs from table-cfs config in peer: " + id); } } else { - throw new ReplicationException("No table: " + table + " in table-cfs config of peer: " + id); + throw new ReplicationException("No table: " + + table + " in table-cfs config of peer: " + id); } } - setPeerTableCFs(id, preTableCfs); + this.replicationPeers.setPeerTableCFsConfig(id, tableCFsBuilder.build()); } /** @@ -462,7 +453,7 @@ public class ReplicationAdmin implements Closeable { */ public void setPeerTableCFs(String id, Map> tableCfs) throws ReplicationException { - this.replicationPeers.setPeerTableCFsConfig(id, getTableCfsStr(tableCfs)); + this.replicationPeers.setPeerTableCFsConfig(id, TableCFsHelper.convert(tableCfs)); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFsHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFsHelper.java new file mode 100644 index 0000000..1786d8a --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFsHelper.java @@ -0,0 +1,159 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client.replication; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; + +import java.util.Collection; +import java.util.List; +import java.util.Map; + +/** + * Helper for TableCFs converter. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public final class TableCFsHelper { + + private TableCFsHelper() {} + + public static TableCFsProto.TableCFs convert( + Map> tableCfs) { + if (tableCfs == null) { + return null; + } + TableCFsProto.TableCFs.Builder tableCFsBuilder = TableCFsProto.TableCFs.newBuilder(); + if (tableCfs != null) { + TableCFsProto.TableCF.Builder tableCFBuilder = TableCFsProto.TableCF.newBuilder(); + int count = 0; + for (Map.Entry> entry : tableCfs.entrySet()) { + tableCFBuilder.clear(); + tableCFBuilder.setTableName(entry.getKey().toString()); + if (entry.getValue() != null && !entry.getValue().isEmpty()) { + for (String value : entry.getValue()) { + tableCFBuilder.addCfs(value); + } + } + tableCFsBuilder.addTableCfs(count++, tableCFBuilder.build()); + } + } + return tableCFsBuilder.build(); + } + + /** + * Input String Format: table1:cf1,cf2;table2:cfA,cfB;table3 + * */ + public static TableCFsProto.TableCFs convert(String tableCFsConfig) { + //return convert(ReplicationAdmin.parseTableCFsFromConfig(tableCFs)); + if (tableCFsConfig == null || tableCFsConfig.trim().length() == 0) { + return null; + } + + TableCFsProto.TableCFs.Builder tableCFsBuilder = TableCFsProto.TableCFs.newBuilder(); + TableCFsProto.TableCF.Builder tableCFBuilder = TableCFsProto.TableCF.newBuilder(); + // TODO: This should be a PB object rather than a String to be parsed!! See HBASE-11393 + // parse out (table, cf-list) pairs from tableCFsConfig + // format: "table1:cf1,cf2;table2:cfA,cfB" + String[] tables = tableCFsConfig.split(";"); + for (String tab : tables) { + // 1 ignore empty table config + tab = tab.trim(); + if (tab.length() == 0) { + continue; + } + // 2 split to "table" and "cf1,cf2" + // for each table: "table:cf1,cf2" or "table" + String[] pair = tab.split(":"); + String tabName = pair[0].trim(); + if (pair.length > 2 || tabName.length() == 0) { + continue; + } + + tableCFBuilder.clear(); + tableCFBuilder.setTableName(tabName); + // 3 parse "cf1,cf2" part to List + if (pair.length == 2) { + String[] cfsList = pair[1].split(","); + for (String cf : cfsList) { + String cfName = cf.trim(); + if (cfName.length() > 0) { + tableCFBuilder.addCfs(cfName); + } + } + } + tableCFsBuilder.addTableCfs(tableCFBuilder.build()); + } + return tableCFsBuilder.build(); + } + + /** + * Output String Format: table1:cf1,cf2;table2:cfA,cfB;table3 + * */ + public static String convert(TableCFsProto.TableCFs tableCFs) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i 0) { + sb.deleteCharAt(sb.length() - 1); + } + return sb.toString(); + } + + public static Map> convert2Map( + TableCFsProto.TableCFs tableCFs) { + if (tableCFs == null || tableCFs.getTableCfsCount() == 0) { + return null; + } + return ReplicationAdmin.parseTableCFsFromConfig(convert(tableCFs)) ; + } + + + public static TableCFsProto.TableCF getTableCF(TableCFsProto.TableCFs tableCFs, + String table) { + for (int i = 0; i < tableCFs.getTableCfsCount(); i++) { + TableCFsProto.TableCF tableCF = tableCFs.getTableCfs(i); + if (tableCF.getTableName().equals(table)) { + return tableCF; + } + } + return null; + } + + public static int getTableCFIndex(TableCFsProto.TableCFs tableCFs, + String table) { + for (int i = 0; i < tableCFs.getTableCfsCount(); i++) { + TableCFsProto.TableCF tableCF = tableCFs.getTableCfs(i); + if (tableCF.getTableName().equals(table)) { + return i; + } + } + return -1; + } + + +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java index b8b5b22..bb5059c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java @@ -17,13 +17,10 @@ */ package org.apache.hadoop.hbase.replication; -import java.util.List; -import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; /** * ReplicationPeer manages enabled / disabled state for the peer. @@ -68,6 +65,6 @@ public interface ReplicationPeer { * Get replicable (table, cf-list) map of this peer * @return the replicable (table, cf-list) map */ - public Map> getTableCFs(); + public TableCFsProto.TableCFs getTableCFs(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java index 3ac8007..f4b4c71 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java @@ -20,21 +20,17 @@ package org.apache.hadoop.hbase.replication; import java.io.Closeable; import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import com.google.protobuf.InvalidProtocolBufferException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -48,7 +44,7 @@ public class ReplicationPeerZKImpl implements ReplicationPeer, Abortable, Closea private final ReplicationPeerConfig peerConfig; private final String id; private volatile PeerState peerState; - private volatile Map> tableCFs = new HashMap>(); + private volatile TableCFsProto.TableCFs tableCFs = null; private final Configuration conf; private PeerStateTracker peerStateTracker; @@ -110,8 +106,14 @@ public class ReplicationPeerZKImpl implements ReplicationPeer, Abortable, Closea } private void readTableCFsZnode() { - String currentTableCFs = Bytes.toString(tableCFsTracker.getData(false)); - this.tableCFs = ReplicationAdmin.parseTableCFsFromConfig(currentTableCFs); + try { + byte[] bytes = tableCFsTracker.getData(false); + if (bytes != null) { + this.tableCFs = TableCFsProto.TableCFs.parseFrom(bytes); + } + } catch (InvalidProtocolBufferException e) { + LOG.error("", e); + } } @Override @@ -151,7 +153,7 @@ public class ReplicationPeerZKImpl implements ReplicationPeer, Abortable, Closea * @return the replicable (table, cf-list) map */ @Override - public Map> getTableCFs() { + public TableCFsProto.TableCFs getTableCFs() { return this.tableCFs; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java index 8e80e06..aef5124 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java @@ -23,8 +23,8 @@ import java.util.Map; import java.util.Set; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; import org.apache.hadoop.hbase.util.Pair; /** @@ -52,7 +52,8 @@ public interface ReplicationPeers { * @param tableCFs the table and column-family list which will be replicated for this peer or null * for all table and column families */ - void addPeer(String peerId, ReplicationPeerConfig peerConfig, String tableCFs) + void addPeer(String peerId, ReplicationPeerConfig peerConfig, + TableCFsProto.TableCFs tableCFs) throws ReplicationException; /** @@ -81,21 +82,16 @@ public interface ReplicationPeers { * Get the table and column-family list string of the peer from ZK. * @param peerId a short that identifies the cluster */ - public String getPeerTableCFsConfig(String peerId) throws ReplicationException; + public TableCFsProto.TableCFs getPeerTableCFsConfig(String peerId) + throws ReplicationException; /** * Set the table and column-family list string of the peer to ZK. * @param peerId a short that identifies the cluster * @param tableCFs the table and column-family list which will be replicated for this peer */ - public void setPeerTableCFsConfig(String peerId, String tableCFs) throws ReplicationException; - - /** - * Get the table and column-family-list map of the peer. - * @param peerId a short that identifies the cluster - * @return the table and column-family list which will be replicated for this peer - */ - public Map> getTableCFs(String peerId); + public void setPeerTableCFsConfig(String peerId, TableCFsProto.TableCFs tableCFs) + throws ReplicationException; /** * Returns the ReplicationPeer diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java index 1884469..235f754 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java @@ -33,11 +33,12 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.CompoundConfiguration; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.replication.TableCFsHelper; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState; import org.apache.hadoop.hbase.util.Bytes; @@ -106,7 +107,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re } @Override - public void addPeer(String id, ReplicationPeerConfig peerConfig, String tableCFs) + public void addPeer(String id, ReplicationPeerConfig peerConfig, TableCFsProto.TableCFs tableCFs) throws ReplicationException { try { if (peerExists(id)) { @@ -129,8 +130,8 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re // peer-state znode. This happens while adding a peer // The peer state data is set as "ENABLED" by default. ZKUtilOp op2 = ZKUtilOp.createAndFailSilent(getPeerStateNode(id), ENABLED_ZNODE_BYTES); - String tableCFsStr = (tableCFs == null) ? "" : tableCFs; - ZKUtilOp op3 = ZKUtilOp.createAndFailSilent(getTableCFsNode(id), Bytes.toBytes(tableCFsStr)); + byte[] bytes = (tableCFs == null) ? Bytes.toBytes("") : tableCFs.toByteArray(); + ZKUtilOp op3 = ZKUtilOp.createAndFailSilent(getTableCFsNode(id), bytes); listOfOps.add(op1); listOfOps.add(op2); listOfOps.add(op3); @@ -168,13 +169,13 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re } @Override - public String getPeerTableCFsConfig(String id) throws ReplicationException { + public TableCFsProto.TableCFs getPeerTableCFsConfig(String id) throws ReplicationException { try { if (!peerExists(id)) { throw new IllegalArgumentException("peer " + id + " doesn't exist"); } try { - return Bytes.toString(ZKUtil.getData(this.zookeeper, getTableCFsNode(id))); + return TableCFsProto.TableCFs.parseFrom(ZKUtil.getData(this.zookeeper, getTableCFsNode(id))); } catch (Exception e) { throw new ReplicationException(e); } @@ -184,35 +185,26 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re } @Override - public void setPeerTableCFsConfig(String id, String tableCFsStr) throws ReplicationException { + public void setPeerTableCFsConfig(String id, TableCFsProto.TableCFs tableCFs) + throws ReplicationException { try { if (!peerExists(id)) { throw new IllegalArgumentException("Cannot set peer tableCFs because id=" + id + " does not exist."); } String tableCFsZKNode = getTableCFsNode(id); - byte[] tableCFs = Bytes.toBytes(tableCFsStr); if (ZKUtil.checkExists(this.zookeeper, tableCFsZKNode) != -1) { - ZKUtil.setData(this.zookeeper, tableCFsZKNode, tableCFs); + ZKUtil.setData(this.zookeeper, tableCFsZKNode, tableCFs.toByteArray()); } else { - ZKUtil.createAndWatch(this.zookeeper, tableCFsZKNode, tableCFs); + ZKUtil.createAndWatch(this.zookeeper, tableCFsZKNode, tableCFs.toByteArray()); } - LOG.info("Peer tableCFs with id= " + id + " is now " + tableCFsStr); + LOG.info("Peer tableCFs with id= " + id + " is now " + TableCFsHelper.convert(tableCFs)); } catch (KeeperException e) { throw new ReplicationException("Unable to change tableCFs of the peer with id=" + id, e); } } @Override - public Map> getTableCFs(String id) throws IllegalArgumentException { - ReplicationPeer replicationPeer = this.peerClusters.get(id); - if (replicationPeer == null) { - throw new IllegalArgumentException("Peer with id= " + id + " is not connected"); - } - return replicationPeer.getTableCFs(); - } - - @Override public boolean getStatusOfPeer(String id) { ReplicationPeer replicationPeer = this.peerClusters.get(id); if (replicationPeer == null) { diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableCFsProto.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableCFsProto.java new file mode 100644 index 0000000..07451b2 --- /dev/null +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableCFsProto.java @@ -0,0 +1,1472 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: TableCFs.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class TableCFsProto { + private TableCFsProto() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface TableCFOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional string table_name = 1; + /** + * optional string table_name = 1; + */ + boolean hasTableName(); + /** + * optional string table_name = 1; + */ + java.lang.String getTableName(); + /** + * optional string table_name = 1; + */ + com.google.protobuf.ByteString + getTableNameBytes(); + + // repeated string cfs = 2; + /** + * repeated string cfs = 2; + */ + java.util.List + getCfsList(); + /** + * repeated string cfs = 2; + */ + int getCfsCount(); + /** + * repeated string cfs = 2; + */ + java.lang.String getCfs(int index); + /** + * repeated string cfs = 2; + */ + com.google.protobuf.ByteString + getCfsBytes(int index); + } + /** + * Protobuf type {@code hbase.pb.TableCF} + */ + public static final class TableCF extends + com.google.protobuf.GeneratedMessage + implements TableCFOrBuilder { + // Use TableCF.newBuilder() to construct. + private TableCF(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableCF(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableCF defaultInstance; + public static TableCF getDefaultInstance() { + return defaultInstance; + } + + public TableCF getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableCF( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + tableName_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + cfs_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000002; + } + cfs_.add(input.readBytes()); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + cfs_ = new com.google.protobuf.UnmodifiableLazyStringList(cfs_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCF_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCF_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.class, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableCF parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableCF(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional string table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private java.lang.Object tableName_; + /** + * optional string table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string table_name = 1; + */ + public java.lang.String getTableName() { + java.lang.Object ref = tableName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + tableName_ = s; + } + return s; + } + } + /** + * optional string table_name = 1; + */ + public com.google.protobuf.ByteString + getTableNameBytes() { + java.lang.Object ref = tableName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + tableName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated string cfs = 2; + public static final int CFS_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList cfs_; + /** + * repeated string cfs = 2; + */ + public java.util.List + getCfsList() { + return cfs_; + } + /** + * repeated string cfs = 2; + */ + public int getCfsCount() { + return cfs_.size(); + } + /** + * repeated string cfs = 2; + */ + public java.lang.String getCfs(int index) { + return cfs_.get(index); + } + /** + * repeated string cfs = 2; + */ + public com.google.protobuf.ByteString + getCfsBytes(int index) { + return cfs_.getByteString(index); + } + + private void initFields() { + tableName_ = ""; + cfs_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getTableNameBytes()); + } + for (int i = 0; i < cfs_.size(); i++) { + output.writeBytes(2, cfs_.getByteString(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getTableNameBytes()); + } + { + int dataSize = 0; + for (int i = 0; i < cfs_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(cfs_.getByteString(i)); + } + size += dataSize; + size += 1 * getCfsList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF other = (org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && getCfsList() + .equals(other.getCfsList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (getCfsCount() > 0) { + hash = (37 * hash) + CFS_FIELD_NUMBER; + hash = (53 * hash) + getCfsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.TableCF} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCF_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCF_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.class, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + tableName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + cfs_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCF_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF build() { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF result = new org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.tableName_ = tableName_; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + cfs_ = new com.google.protobuf.UnmodifiableLazyStringList( + cfs_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.cfs_ = cfs_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.getDefaultInstance()) return this; + if (other.hasTableName()) { + bitField0_ |= 0x00000001; + tableName_ = other.tableName_; + onChanged(); + } + if (!other.cfs_.isEmpty()) { + if (cfs_.isEmpty()) { + cfs_ = other.cfs_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureCfsIsMutable(); + cfs_.addAll(other.cfs_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional string table_name = 1; + private java.lang.Object tableName_ = ""; + /** + * optional string table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional string table_name = 1; + */ + public java.lang.String getTableName() { + java.lang.Object ref = tableName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + tableName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string table_name = 1; + */ + public com.google.protobuf.ByteString + getTableNameBytes() { + java.lang.Object ref = tableName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + tableName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string table_name = 1; + */ + public Builder setTableName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + tableName_ = value; + onChanged(); + return this; + } + /** + * optional string table_name = 1; + */ + public Builder clearTableName() { + bitField0_ = (bitField0_ & ~0x00000001); + tableName_ = getDefaultInstance().getTableName(); + onChanged(); + return this; + } + /** + * optional string table_name = 1; + */ + public Builder setTableNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + tableName_ = value; + onChanged(); + return this; + } + + // repeated string cfs = 2; + private com.google.protobuf.LazyStringList cfs_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private void ensureCfsIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + cfs_ = new com.google.protobuf.LazyStringArrayList(cfs_); + bitField0_ |= 0x00000002; + } + } + /** + * repeated string cfs = 2; + */ + public java.util.List + getCfsList() { + return java.util.Collections.unmodifiableList(cfs_); + } + /** + * repeated string cfs = 2; + */ + public int getCfsCount() { + return cfs_.size(); + } + /** + * repeated string cfs = 2; + */ + public java.lang.String getCfs(int index) { + return cfs_.get(index); + } + /** + * repeated string cfs = 2; + */ + public com.google.protobuf.ByteString + getCfsBytes(int index) { + return cfs_.getByteString(index); + } + /** + * repeated string cfs = 2; + */ + public Builder setCfs( + int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureCfsIsMutable(); + cfs_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string cfs = 2; + */ + public Builder addCfs( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureCfsIsMutable(); + cfs_.add(value); + onChanged(); + return this; + } + /** + * repeated string cfs = 2; + */ + public Builder addAllCfs( + java.lang.Iterable values) { + ensureCfsIsMutable(); + super.addAll(values, cfs_); + onChanged(); + return this; + } + /** + * repeated string cfs = 2; + */ + public Builder clearCfs() { + cfs_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * repeated string cfs = 2; + */ + public Builder addCfsBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureCfsIsMutable(); + cfs_.add(value); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.TableCF) + } + + static { + defaultInstance = new TableCF(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.TableCF) + } + + public interface TableCFsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .hbase.pb.TableCF table_cfs = 1; + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + java.util.List + getTableCfsList(); + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF getTableCfs(int index); + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + int getTableCfsCount(); + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + java.util.List + getTableCfsOrBuilderList(); + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder getTableCfsOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.TableCFs} + */ + public static final class TableCFs extends + com.google.protobuf.GeneratedMessage + implements TableCFsOrBuilder { + // Use TableCFs.newBuilder() to construct. + private TableCFs(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableCFs(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableCFs defaultInstance; + public static TableCFs getDefaultInstance() { + return defaultInstance; + } + + public TableCFs getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableCFs( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + tableCfs_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + tableCfs_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + tableCfs_ = java.util.Collections.unmodifiableList(tableCfs_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCFs_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCFs_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.class, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableCFs parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableCFs(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .hbase.pb.TableCF table_cfs = 1; + public static final int TABLE_CFS_FIELD_NUMBER = 1; + private java.util.List tableCfs_; + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public java.util.List getTableCfsList() { + return tableCfs_; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public java.util.List + getTableCfsOrBuilderList() { + return tableCfs_; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public int getTableCfsCount() { + return tableCfs_.size(); + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF getTableCfs(int index) { + return tableCfs_.get(index); + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder getTableCfsOrBuilder( + int index) { + return tableCfs_.get(index); + } + + private void initFields() { + tableCfs_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < tableCfs_.size(); i++) { + output.writeMessage(1, tableCfs_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < tableCfs_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableCfs_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs other = (org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs) obj; + + boolean result = true; + result = result && getTableCfsList() + .equals(other.getTableCfsList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getTableCfsCount() > 0) { + hash = (37 * hash) + TABLE_CFS_FIELD_NUMBER; + hash = (53 * hash) + getTableCfsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.TableCFs} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCFs_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCFs_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.class, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableCfsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableCfsBuilder_ == null) { + tableCfs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + tableCfsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCFs_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs build() { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs result = new org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs(this); + int from_bitField0_ = bitField0_; + if (tableCfsBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + tableCfs_ = java.util.Collections.unmodifiableList(tableCfs_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.tableCfs_ = tableCfs_; + } else { + result.tableCfs_ = tableCfsBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.getDefaultInstance()) return this; + if (tableCfsBuilder_ == null) { + if (!other.tableCfs_.isEmpty()) { + if (tableCfs_.isEmpty()) { + tableCfs_ = other.tableCfs_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureTableCfsIsMutable(); + tableCfs_.addAll(other.tableCfs_); + } + onChanged(); + } + } else { + if (!other.tableCfs_.isEmpty()) { + if (tableCfsBuilder_.isEmpty()) { + tableCfsBuilder_.dispose(); + tableCfsBuilder_ = null; + tableCfs_ = other.tableCfs_; + bitField0_ = (bitField0_ & ~0x00000001); + tableCfsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableCfsFieldBuilder() : null; + } else { + tableCfsBuilder_.addAllMessages(other.tableCfs_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .hbase.pb.TableCF table_cfs = 1; + private java.util.List tableCfs_ = + java.util.Collections.emptyList(); + private void ensureTableCfsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + tableCfs_ = new java.util.ArrayList(tableCfs_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder> tableCfsBuilder_; + + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public java.util.List getTableCfsList() { + if (tableCfsBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableCfs_); + } else { + return tableCfsBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public int getTableCfsCount() { + if (tableCfsBuilder_ == null) { + return tableCfs_.size(); + } else { + return tableCfsBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF getTableCfs(int index) { + if (tableCfsBuilder_ == null) { + return tableCfs_.get(index); + } else { + return tableCfsBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder setTableCfs( + int index, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF value) { + if (tableCfsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableCfsIsMutable(); + tableCfs_.set(index, value); + onChanged(); + } else { + tableCfsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder setTableCfs( + int index, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder builderForValue) { + if (tableCfsBuilder_ == null) { + ensureTableCfsIsMutable(); + tableCfs_.set(index, builderForValue.build()); + onChanged(); + } else { + tableCfsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder addTableCfs(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF value) { + if (tableCfsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableCfsIsMutable(); + tableCfs_.add(value); + onChanged(); + } else { + tableCfsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder addTableCfs( + int index, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF value) { + if (tableCfsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableCfsIsMutable(); + tableCfs_.add(index, value); + onChanged(); + } else { + tableCfsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder addTableCfs( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder builderForValue) { + if (tableCfsBuilder_ == null) { + ensureTableCfsIsMutable(); + tableCfs_.add(builderForValue.build()); + onChanged(); + } else { + tableCfsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder addTableCfs( + int index, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder builderForValue) { + if (tableCfsBuilder_ == null) { + ensureTableCfsIsMutable(); + tableCfs_.add(index, builderForValue.build()); + onChanged(); + } else { + tableCfsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder addAllTableCfs( + java.lang.Iterable values) { + if (tableCfsBuilder_ == null) { + ensureTableCfsIsMutable(); + super.addAll(values, tableCfs_); + onChanged(); + } else { + tableCfsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder clearTableCfs() { + if (tableCfsBuilder_ == null) { + tableCfs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + tableCfsBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder removeTableCfs(int index) { + if (tableCfsBuilder_ == null) { + ensureTableCfsIsMutable(); + tableCfs_.remove(index); + onChanged(); + } else { + tableCfsBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder getTableCfsBuilder( + int index) { + return getTableCfsFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder getTableCfsOrBuilder( + int index) { + if (tableCfsBuilder_ == null) { + return tableCfs_.get(index); } else { + return tableCfsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public java.util.List + getTableCfsOrBuilderList() { + if (tableCfsBuilder_ != null) { + return tableCfsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableCfs_); + } + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder addTableCfsBuilder() { + return getTableCfsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder addTableCfsBuilder( + int index) { + return getTableCfsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public java.util.List + getTableCfsBuilderList() { + return getTableCfsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder> + getTableCfsFieldBuilder() { + if (tableCfsBuilder_ == null) { + tableCfsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder>( + tableCfs_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + tableCfs_ = null; + } + return tableCfsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.TableCFs) + } + + static { + defaultInstance = new TableCFs(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.TableCFs) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_TableCF_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_TableCF_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_TableCFs_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_TableCFs_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\016TableCFs.proto\022\010hbase.pb\"*\n\007TableCF\022\022\n" + + "\ntable_name\030\001 \001(\t\022\013\n\003cfs\030\002 \003(\t\"0\n\010TableC" + + "Fs\022$\n\ttable_cfs\030\001 \003(\0132\021.hbase.pb.TableCF" + + "B@\n*org.apache.hadoop.hbase.protobuf.gen" + + "eratedB\rTableCFsProtoH\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_hbase_pb_TableCF_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_hbase_pb_TableCF_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_TableCF_descriptor, + new java.lang.String[] { "TableName", "Cfs", }); + internal_static_hbase_pb_TableCFs_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_hbase_pb_TableCFs_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_TableCFs_descriptor, + new java.lang.String[] { "TableCfs", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hbase-protocol/src/main/protobuf/TableCFs.proto b/hbase-protocol/src/main/protobuf/TableCFs.proto new file mode 100644 index 0000000..876de43 --- /dev/null +++ b/hbase-protocol/src/main/protobuf/TableCFs.proto @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Cell and KeyValue protos +package hbase.pb; + +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "TableCFsProto"; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +message TableCF { + optional string table_name = 1; + repeated string cfs = 2; +} + +message TableCFs { + repeated TableCF table_cfs = 1; +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java index 0cbbcef..6e8cef3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.replication; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -27,6 +28,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.replication.TableCFsHelper; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL.Entry; @@ -43,29 +46,29 @@ public class TableCfWALEntryFilter implements WALEntryFilter { public Entry filter(Entry entry) { TableName tabName = entry.getKey().getTablename(); ArrayList cells = entry.getEdit().getCells(); - Map> tableCFs = null; - - try { - tableCFs = this.peer.getTableCFs(); - } catch (IllegalArgumentException e) { - LOG.error("should not happen: can't get tableCFs for peer " + peer.getId() + - ", degenerate as if it's not configured by keeping tableCFs==null"); + TableCFsProto.TableCFs tableCFs = this.peer.getTableCFs(); + if (tableCFs == null) { + //It means there is no zNode, or we has't read it, do nothing. + return null; } int size = cells.size(); + if (tableCFs.getTableCfsCount() == 0) { + // It means we not filter anthing + return entry; + } - // return null(prevent replicating) if logKey's table isn't in this peer's - // replicable table list (empty tableCFs means all table are replicable) - if (tableCFs != null && !tableCFs.containsKey(tabName)) { + + TableCFsProto.TableCF tableCF = TableCFsHelper.getTableCF(tableCFs, tabName.toString()); + if (tableCF == null) { return null; - } else { - List cfs = (tableCFs == null) ? null : tableCFs.get(tabName); - for (int i = size - 1; i >= 0; i--) { - Cell cell = cells.get(i); - // ignore(remove) kv if its cf isn't in the replicable cf list - // (empty cfs means all cfs of this table are replicable) - if ((cfs != null && !cfs.contains(Bytes.toString(CellUtil.cloneFamily(cell))))) { - cells.remove(i); - } + } + List cfs = tableCF.getCfsList(); + for (int i = size - 1; i >= 0; i--) { + Cell cell = cells.get(i); + // ignore(remove) kv if its cf isn't in the replicable cf list + // (empty cfs means all cfs of this table are replicable) + if ((cfs.size() != 0 && !cfs.contains(Bytes.toString(CellUtil.cloneFamily(cell))))) { + cells.remove(i); } } if (cells.size() < size/2) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java index e187b9b..1ddcb6a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java @@ -170,37 +170,6 @@ public class TestReplicationAdmin { } @Test - public void testGetTableCfsStr() { - // opposite of TestPerTableCFReplication#testParseTableCFsFromConfig() - - Map> tabCFsMap = null; - - // 1. null or empty string, result should be null - assertEquals(null, ReplicationAdmin.getTableCfsStr(tabCFsMap)); - - - // 2. single table: "tab1" / "tab2:cf1" / "tab3:cf1,cf3" - tabCFsMap = new TreeMap>(); - tabCFsMap.put(TableName.valueOf("tab1"), null); // its table name is "tab1" - assertEquals("tab1", ReplicationAdmin.getTableCfsStr(tabCFsMap)); - - tabCFsMap = new TreeMap>(); - tabCFsMap.put(TableName.valueOf("tab1"), Lists.newArrayList("cf1")); - assertEquals("tab1:cf1", ReplicationAdmin.getTableCfsStr(tabCFsMap)); - - tabCFsMap = new TreeMap>(); - tabCFsMap.put(TableName.valueOf("tab1"), Lists.newArrayList("cf1", "cf3")); - assertEquals("tab1:cf1,cf3", ReplicationAdmin.getTableCfsStr(tabCFsMap)); - - // 3. multiple tables: "tab1 ; tab2:cf1 ; tab3:cf1,cf3" - tabCFsMap = new TreeMap>(); - tabCFsMap.put(TableName.valueOf("tab1"), null); - tabCFsMap.put(TableName.valueOf("tab2"), Lists.newArrayList("cf1")); - tabCFsMap.put(TableName.valueOf("tab3"), Lists.newArrayList("cf1", "cf3")); - assertEquals("tab1;tab2:cf1;tab3:cf1,cf3", ReplicationAdmin.getTableCfsStr(tabCFsMap)); - } - - @Test public void testAppendPeerTableCFs() throws Exception { // Add a valid peer admin.addPeer(ID_ONE, KEY_ONE); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java index 29a052b..98d8219 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java @@ -19,13 +19,9 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -47,7 +43,9 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; +import org.apache.hadoop.hbase.client.replication.TableCFsHelper; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; import org.apache.hadoop.hbase.testclassification.FlakeyTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -58,6 +56,8 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import static org.junit.Assert.*; + @Category({FlakeyTests.class, LargeTests.class}) public class TestPerTableCFReplication { @@ -270,6 +270,97 @@ public class TestPerTableCFReplication { assertTrue(tabCFsMap.get(tab3).contains("cf3")); } + @Test + public void testTableCFsHelperConverter() { + + TableCFsProto.TableCFs tableCFs = null; + Map> tabCFsMap = null; + + // 1. null or empty string, result should be null + assertNull(TableCFsHelper.convert(tabCFsMap)); + + tabCFsMap = new HashMap>(); + tableCFs = TableCFsHelper.convert(tabCFsMap); + assertEquals(0, tableCFs.getTableCfsCount()); + + TableName tab1 = TableName.valueOf("tab1"); + TableName tab2 = TableName.valueOf("tab2"); + TableName tab3 = TableName.valueOf("tab3"); + + // 2. single table: "tab1" / "tab2:cf1" / "tab3:cf1,cf3" + tabCFsMap.clear(); + tabCFsMap.put(tab1, null); + tableCFs = TableCFsHelper.convert(tabCFsMap); + assertEquals(1, tableCFs.getTableCfsCount()); // only one table + assertEquals(tab1.toString(), tableCFs.getTableCfs(0).getTableName()); + assertEquals(0, tableCFs.getTableCfs(0).getCfsCount()); + + tabCFsMap.clear(); + tabCFsMap.put(tab2, new ArrayList()); + tabCFsMap.get(tab2).add("cf1"); + tableCFs = TableCFsHelper.convert(tabCFsMap); + assertEquals(1, tableCFs.getTableCfsCount()); // only one table + assertEquals(tab2.toString(), tableCFs.getTableCfs(0).getTableName()); + assertEquals(1, tableCFs.getTableCfs(0).getCfsCount()); + assertEquals("cf1", tableCFs.getTableCfs(0).getCfs(0)); + + tabCFsMap.clear(); + tabCFsMap.put(tab3, new ArrayList()); + tabCFsMap.get(tab3).add("cf1"); + tabCFsMap.get(tab3).add("cf3"); + tableCFs = TableCFsHelper.convert(tabCFsMap); + assertEquals(1, tableCFs.getTableCfsCount()); + assertEquals(tab3.toString(), tableCFs.getTableCfs(0).getTableName()); + assertEquals(2, tableCFs.getTableCfs(0).getCfsCount()); + assertEquals("cf1", tableCFs.getTableCfs(0).getCfs(0)); + assertEquals("cf3", tableCFs.getTableCfs(0).getCfs(1)); + + tabCFsMap.clear(); + tabCFsMap.put(tab1, null); + tabCFsMap.put(tab2, new ArrayList()); + tabCFsMap.get(tab2).add("cf1"); + tabCFsMap.put(tab3, new ArrayList()); + tabCFsMap.get(tab3).add("cf1"); + tabCFsMap.get(tab3).add("cf3"); + + tableCFs = TableCFsHelper.convert(tabCFsMap); + assertEquals(3, tableCFs.getTableCfsCount()); + assertTrue(containTable(tableCFs, tab1.toString())); + assertTrue(containTable(tableCFs, tab2.toString())); + assertTrue(containTable(tableCFs, tab3.toString())); + + assertEquals(0, + TableCFsHelper.getTableCF(tableCFs, tab1.toString()).getCfsCount()); + + assertEquals(1, + TableCFsHelper.getTableCF(tableCFs, tab2.toString()).getCfsCount()); + assertEquals("cf1", + TableCFsHelper.getTableCF(tableCFs, tab2.toString()).getCfs(0)); + + assertEquals(2, + TableCFsHelper.getTableCF(tableCFs, tab3.toString()).getCfsCount()); + assertEquals("cf1", + TableCFsHelper.getTableCF(tableCFs, tab3.toString()).getCfs(0)); + assertEquals("cf3", + TableCFsHelper.getTableCF(tableCFs, tab3.toString()).getCfs(1)); + } + + + private boolean containTable(TableCFsProto.TableCFs tableCFs, + String table) { + if (tableCFs == null || table == null) { + return false; + } + for (int i = 0; i < tableCFs.getTableCfsCount(); i++) { + TableCFsProto.TableCF tableCF = tableCFs.getTableCfs(i); + if (tableCF.getTableName().equals(table)) { + return true; + } + } + return false; + } + + @Test(timeout=300000) public void testPerTableCFReplication() throws Exception { LOG.info("testPerTableCFReplication"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java index 22c421d..b394482 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.replication.TableCFsHelper; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.TableName; @@ -208,20 +209,20 @@ public class TestReplicationWALEntryFilters { when(peer.getTableCFs()).thenReturn(null); Entry userEntry = createEntry(a, b, c); TableCfWALEntryFilter filter = new TableCfWALEntryFilter(peer); - assertEquals(createEntry(a,b,c), filter.filter(userEntry)); + assertEquals(null, filter.filter(userEntry)); // empty map userEntry = createEntry(a, b, c); Map> tableCfs = new HashMap>(); - when(peer.getTableCFs()).thenReturn(tableCfs); + when(peer.getTableCFs()).thenReturn(TableCFsHelper.convert(tableCfs)); filter = new TableCfWALEntryFilter(peer); - assertEquals(null, filter.filter(userEntry)); + assertEquals(createEntry(a,b,c), filter.filter(userEntry)); // table bar userEntry = createEntry(a, b, c); tableCfs = new HashMap>(); tableCfs.put(TableName.valueOf("bar"), null); - when(peer.getTableCFs()).thenReturn(tableCfs); + when(peer.getTableCFs()).thenReturn(TableCFsHelper.convert(tableCfs)); filter = new TableCfWALEntryFilter(peer); assertEquals(null, filter.filter(userEntry)); @@ -229,7 +230,7 @@ public class TestReplicationWALEntryFilters { userEntry = createEntry(a, b, c); tableCfs = new HashMap>(); tableCfs.put(TableName.valueOf("foo"), Lists.newArrayList("a")); - when(peer.getTableCFs()).thenReturn(tableCfs); + when(peer.getTableCFs()).thenReturn(TableCFsHelper.convert(tableCfs)); filter = new TableCfWALEntryFilter(peer); assertEquals(createEntry(a), filter.filter(userEntry)); @@ -237,7 +238,7 @@ public class TestReplicationWALEntryFilters { userEntry = createEntry(a, b, c, d); tableCfs = new HashMap>(); tableCfs.put(TableName.valueOf("foo"), Lists.newArrayList("a", "c")); - when(peer.getTableCFs()).thenReturn(tableCfs); + when(peer.getTableCFs()).thenReturn(TableCFsHelper.convert(tableCfs)); filter = new TableCfWALEntryFilter(peer); assertEquals(createEntry(a,c), filter.filter(userEntry)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java index a870ed8..7653c9f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster; @@ -281,7 +282,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster { when(context.getMetrics()).thenReturn(mock(MetricsSource.class)); ReplicationPeer mockPeer = mock(ReplicationPeer.class); - when(mockPeer.getTableCFs()).thenReturn(null); + when(mockPeer.getTableCFs()).thenReturn(TableCFsProto.TableCFs.newBuilder().build()); when(context.getReplicationPeer()).thenReturn(mockPeer); replicator.init(context); -- 1.9.3 (Apple Git-50)