From c1575d24657d9fda8befe7a103b04180d29c3a39 Mon Sep 17 00:00:00 2001 From: chenheng Date: Mon, 2 Nov 2015 17:42:31 +0800 Subject: [PATCH] HBASE-11393 Replication TableCfs should be a PB object rather than a string --- .../hbase/client/replication/ReplicationAdmin.java | 179 +-- .../hbase/client/replication/TableCFsHelper.java | 223 +++ .../hadoop/hbase/replication/ReplicationPeer.java | 7 +- .../hbase/replication/ReplicationPeerZKImpl.java | 25 +- .../hadoop/hbase/replication/ReplicationPeers.java | 18 +- .../hbase/replication/ReplicationPeersZKImpl.java | 33 +- .../hbase/protobuf/generated/TableCFsProto.java | 1499 ++++++++++++++++++++ hbase-protocol/src/main/protobuf/TableCFs.proto | 36 + .../hbase/replication/TableCfWALEntryFilter.java | 45 +- .../client/replication/TestReplicationAdmin.java | 31 - .../replication/TestPerTableCFReplication.java | 140 +- .../TestReplicationWALEntryFilters.java | 13 +- ...stRegionReplicaReplicationEndpointNoMaster.java | 3 +- 13 files changed, 2016 insertions(+), 236 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFsHelper.java create mode 100644 hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableCFsProto.java create mode 100644 hbase-protocol/src/main/protobuf/TableCFs.proto diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java index 8bd1267..8fe45ab 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -29,6 +29,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import com.google.protobuf.ByteString; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -45,6 +46,8 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeer; @@ -186,7 +189,7 @@ public class ReplicationAdmin implements Closeable { public void addPeer(String id, String clusterKey, String tableCFs) throws ReplicationException { this.replicationPeers.addPeer(id, - new ReplicationPeerConfig().setClusterKey(clusterKey), tableCFs); + new ReplicationPeerConfig().setClusterKey(clusterKey), TableCFsHelper.convert(tableCFs)); } /** @@ -200,77 +203,7 @@ public class ReplicationAdmin implements Closeable { */ public void addPeer(String id, ReplicationPeerConfig peerConfig, Map> tableCfs) throws ReplicationException { - this.replicationPeers.addPeer(id, peerConfig, getTableCfsStr(tableCfs)); - } - - public static Map> parseTableCFsFromConfig(String tableCFsConfig) { - if (tableCFsConfig == null || tableCFsConfig.trim().length() == 0) { - return null; - } - - Map> tableCFsMap = null; - // TODO: This should be a PB object rather than a String to be parsed!! See HBASE-11393 - // parse out (table, cf-list) pairs from tableCFsConfig - // format: "table1:cf1,cf2;table2:cfA,cfB" - String[] tables = tableCFsConfig.split(";"); - for (String tab : tables) { - // 1 ignore empty table config - tab = tab.trim(); - if (tab.length() == 0) { - continue; - } - // 2 split to "table" and "cf1,cf2" - // for each table: "table:cf1,cf2" or "table" - String[] pair = tab.split(":"); - String tabName = pair[0].trim(); - if (pair.length > 2 || tabName.length() == 0) { - LOG.error("ignore invalid tableCFs setting: " + tab); - continue; - } - - // 3 parse "cf1,cf2" part to List - List cfs = null; - if (pair.length == 2) { - String[] cfsList = pair[1].split(","); - for (String cf : cfsList) { - String cfName = cf.trim(); - if (cfName.length() > 0) { - if (cfs == null) { - cfs = new ArrayList(); - } - cfs.add(cfName); - } - } - } - - // 4 put > to map - if (tableCFsMap == null) { - tableCFsMap = new HashMap>(); - } - tableCFsMap.put(TableName.valueOf(tabName), cfs); - } - return tableCFsMap; - } - - @VisibleForTesting - static String getTableCfsStr(Map> tableCfs) { - String tableCfsStr = null; - if (tableCfs != null) { - // Format: table1:cf1,cf2;table2:cfA,cfB;table3 - StringBuilder builder = new StringBuilder(); - for (Entry> entry : tableCfs.entrySet()) { - if (builder.length() > 0) { - builder.append(";"); - } - builder.append(entry.getKey()); - if (entry.getValue() != null && !entry.getValue().isEmpty()) { - builder.append(":"); - builder.append(StringUtils.join(entry.getValue(), ",")); - } - } - tableCfsStr = builder.toString(); - } - return tableCfsStr; + this.replicationPeers.addPeer(id, peerConfig, TableCFsHelper.convert(tableCfs)); } /** @@ -334,7 +267,7 @@ public class ReplicationAdmin implements Closeable { * @param id a short name that identifies the cluster */ public String getPeerTableCFs(String id) throws ReplicationException { - return this.replicationPeers.getPeerTableCFsConfig(id); + return TableCFsHelper.convert(this.replicationPeers.getPeerTableCFsConfig(id)); } /** @@ -344,7 +277,7 @@ public class ReplicationAdmin implements Closeable { */ @Deprecated public void setPeerTableCFs(String id, String tableCFs) throws ReplicationException { - this.replicationPeers.setPeerTableCFsConfig(id, tableCFs); + this.replicationPeers.setPeerTableCFsConfig(id, TableCFsHelper.convert(tableCFs)); } /** @@ -354,7 +287,7 @@ public class ReplicationAdmin implements Closeable { * @throws ReplicationException */ public void appendPeerTableCFs(String id, String tableCfs) throws ReplicationException { - appendPeerTableCFs(id, parseTableCFsFromConfig(tableCfs)); + appendPeerTableCFs(id, TableCFsHelper.parseTableCFsFromConfig(tableCfs)); } /** @@ -368,33 +301,37 @@ public class ReplicationAdmin implements Closeable { if (tableCfs == null) { throw new ReplicationException("tableCfs is null"); } - Map> preTableCfs = parseTableCFsFromConfig(getPeerTableCFs(id)); - if (preTableCfs == null) { - setPeerTableCFs(id, tableCfs); + TableCFsProto.TableCFs tableCFs = this.replicationPeers.getPeerTableCFsConfig(id); + if (tableCFs == null) { + this.replicationPeers.setPeerTableCFsConfig(id, TableCFsHelper.convert(tableCfs)); return; } - + TableCFsProto.TableCFs.Builder builder = tableCFs.toBuilder(); for (Map.Entry> entry : tableCfs.entrySet()) { TableName table = entry.getKey(); Collection appendCfs = entry.getValue(); - if (preTableCfs.containsKey(table)) { - List cfs = preTableCfs.get(table); - if (cfs == null || appendCfs == null) { - preTableCfs.put(table, null); - } else { - Set cfSet = new HashSet(cfs); - cfSet.addAll(appendCfs); - preTableCfs.put(table, Lists.newArrayList(cfSet)); + TableCFsProto.TableCF tableCF = TableCFsHelper.getTableCF(tableCFs, table.toString()); + if (tableCF != null) { + if (appendCfs != null) { + TableCFsProto.TableCF.Builder tableCFBuilder = tableCF.toBuilder(); + for (String cf : appendCfs) { + if (!tableCF.getFamiliesList().contains(cf)) { + tableCFBuilder.addFamilies(ByteString.copyFromUtf8(cf)); + } + } } } else { - if (appendCfs == null || appendCfs.isEmpty()) { - preTableCfs.put(table, null); - } else { - preTableCfs.put(table, Lists.newArrayList(appendCfs)); + TableCFsProto.TableCF.Builder tableCFBuilder = TableCFsProto.TableCF.newBuilder(); + tableCFBuilder.setTableName(ProtobufUtil.toProtoTableName(table)); + if (appendCfs != null && !appendCfs.isEmpty()) { + for (String cf : appendCfs) { + tableCFBuilder.addFamilies(ByteString.copyFromUtf8(cf)); + } } + builder.addTableCfs(tableCFBuilder.build()); } } - setPeerTableCFs(id, preTableCfs); + this.replicationPeers.setPeerTableCFsConfig(id, builder.build()); } /** @@ -404,52 +341,60 @@ public class ReplicationAdmin implements Closeable { * @throws ReplicationException */ public void removePeerTableCFs(String id, String tableCf) throws ReplicationException { - removePeerTableCFs(id, parseTableCFsFromConfig(tableCf)); + removePeerTableCFs(id, TableCFsHelper.parseTableCFsFromConfig(tableCf)); } /** * Remove some table-cfs from config of the specified peer * @param id a short name that identifies the cluster - * @param tableCfs A map from tableName to column family names + * @param cfs A map from tableName to column family names * @throws ReplicationException */ - public void removePeerTableCFs(String id, Map> tableCfs) + public void removePeerTableCFs(String id, Map> cfs) throws ReplicationException { - if (tableCfs == null) { - throw new ReplicationException("tableCfs is null"); + if (cfs == null) { + throw new ReplicationException("cfs is null"); } - - Map> preTableCfs = parseTableCFsFromConfig(getPeerTableCFs(id)); - if (preTableCfs == null) { + TableCFsProto.TableCFs tableCFs = this.replicationPeers.getPeerTableCFsConfig(id); + if (tableCFs == null) { throw new ReplicationException("Table-Cfs for peer" + id + " is null"); } - for (Map.Entry> entry: tableCfs.entrySet()) { + TableCFsProto.TableCFs.Builder tableCFsBuilder = tableCFs.toBuilder(); + for (Map.Entry> entry: cfs.entrySet()) { TableName table = entry.getKey(); Collection removeCfs = entry.getValue(); - if (preTableCfs.containsKey(table)) { - List cfs = preTableCfs.get(table); - if (cfs == null && removeCfs == null) { - preTableCfs.remove(table); - } else if (cfs != null && removeCfs != null) { - Set cfSet = new HashSet(cfs); - cfSet.removeAll(removeCfs); - if (cfSet.isEmpty()) { - preTableCfs.remove(table); - } else { - preTableCfs.put(table, Lists.newArrayList(cfSet)); + TableCFsProto.TableCF tableCF = TableCFsHelper.getTableCF(tableCFs, table.toString()); + if (tableCF != null) { + // remove the whole table + if (tableCF.getFamiliesCount() == 0 && removeCfs == null) { + tableCFsBuilder.removeTableCfs( + TableCFsHelper.getTableCFIndex(tableCFs, table.toString())); + } else if (tableCF.getFamiliesCount() > 0 && removeCfs != null) { + TableCFsProto.TableCF.Builder tableCFBuilder = tableCF.toBuilder(); + List sets = new ArrayList<>(tableCF.getFamiliesList()); + tableCFBuilder.clearFamilies(); + for (ByteString cf : sets) { + if (!removeCfs.contains(cf.toStringUtf8())) { + tableCFBuilder.addFamilies(cf); + } + } + if (tableCFBuilder.getFamiliesCount() == 0) { + tableCFsBuilder.removeTableCfs( + TableCFsHelper.getTableCFIndex(tableCFs, table.toString())); } - } else if (cfs == null && removeCfs != null) { + } else if (tableCF.getFamiliesCount() == 0 && removeCfs != null) { throw new ReplicationException("Cannot remove cf of table: " + table + " which doesn't specify cfs from table-cfs config in peer: " + id); - } else if (cfs != null && removeCfs == null) { + } else if (tableCF.getFamiliesCount() > 0 && removeCfs == null) { throw new ReplicationException("Cannot remove table: " + table + " which has specified cfs from table-cfs config in peer: " + id); } } else { - throw new ReplicationException("No table: " + table + " in table-cfs config of peer: " + id); + throw new ReplicationException("No table: " + + table + " in table-cfs config of peer: " + id); } } - setPeerTableCFs(id, preTableCfs); + this.replicationPeers.setPeerTableCFsConfig(id, tableCFsBuilder.build()); } /** @@ -462,7 +407,7 @@ public class ReplicationAdmin implements Closeable { */ public void setPeerTableCFs(String id, Map> tableCfs) throws ReplicationException { - this.replicationPeers.setPeerTableCFsConfig(id, getTableCfsStr(tableCfs)); + this.replicationPeers.setPeerTableCFsConfig(id, TableCFsHelper.convert(tableCfs)); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFsHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFsHelper.java new file mode 100644 index 0000000..cf2a9b6 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/TableCFsHelper.java @@ -0,0 +1,223 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client.replication; + +import com.google.protobuf.ByteString; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.ProtobufMagic; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; +import org.apache.hadoop.hbase.util.Bytes; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.HashMap; +import java.util.ArrayList; + +/** + * Helper for TableCFs Operations. + */ +@InterfaceAudience.Private +@InterfaceStability.Unstable +public final class TableCFsHelper { + + private TableCFsHelper() {} + + /** convert map to TableCFs Object */ + public static TableCFsProto.TableCFs convert( + Map> tableCfs) { + if (tableCfs == null) { + return null; + } + TableCFsProto.TableCFs.Builder tableCFsBuilder = TableCFsProto.TableCFs.newBuilder(); + if (tableCfs != null) { + TableCFsProto.TableCF.Builder tableCFBuilder = TableCFsProto.TableCF.newBuilder(); + int count = 0; + for (Map.Entry> entry : tableCfs.entrySet()) { + tableCFBuilder.clear(); + tableCFBuilder.setTableName(ProtobufUtil.toProtoTableName(entry.getKey())); + if (entry.getValue() != null && !entry.getValue().isEmpty()) { + for (String value : entry.getValue()) { + tableCFBuilder.addFamilies(ByteString.copyFromUtf8(value)); + } + } + tableCFsBuilder.addTableCfs(count++, tableCFBuilder.build()); + } + } + return tableCFsBuilder.build(); + } + + /** + * Convert string to TableCFs Object. + * Input String Format: table1:cf1,cf2;table2:cfA,cfB;table3. + * */ + public static TableCFsProto.TableCFs convert(String tableCFsConfig) { + //return convert(ReplicationAdmin.parseTableCFsFromConfig(tableCFs)); + if (tableCFsConfig == null || tableCFsConfig.trim().length() == 0) { + return null; + } + + TableCFsProto.TableCFs.Builder tableCFsBuilder = TableCFsProto.TableCFs.newBuilder(); + TableCFsProto.TableCF.Builder tableCFBuilder = TableCFsProto.TableCF.newBuilder(); + // TODO: This should be a PB object rather than a String to be parsed!! See HBASE-11393 + // parse out (table, cf-list) pairs from tableCFsConfig + // format: "table1:cf1,cf2;table2:cfA,cfB" + String[] tables = tableCFsConfig.split(";"); + for (String tab : tables) { + // 1 ignore empty table config + tab = tab.trim(); + if (tab.length() == 0) { + continue; + } + // 2 split to "table" and "cf1,cf2" + // for each table: "table:cf1,cf2" or "table" + String[] pair = tab.split(":"); + String tabName = pair[0].trim(); + if (pair.length > 2 || tabName.length() == 0) { + continue; + } + + tableCFBuilder.clear(); + tableCFBuilder.setTableName( + ProtobufUtil.toProtoTableName(TableName.valueOf(tabName))); + // 3 parse "cf1,cf2" part to List + if (pair.length == 2) { + String[] cfsList = pair[1].split(","); + for (String cf : cfsList) { + String cfName = cf.trim(); + if (cfName.length() > 0) { + tableCFBuilder.addFamilies(ByteString.copyFromUtf8(cfName)); + } + } + } + tableCFsBuilder.addTableCfs(tableCFBuilder.build()); + } + return tableCFsBuilder.build(); + } + + /** + * Convert TableCFs Object to String. + * Output String Format: table1:cf1,cf2;table2:cfA,cfB;table3 + * */ + public static String convert(TableCFsProto.TableCFs tableCFs) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i 0) { + sb.deleteCharAt(sb.length() - 1); + } + return sb.toString(); + } + + /** + * Get TableCF in TableCFs, if not exist, return null. + * */ + public static TableCFsProto.TableCF getTableCF(TableCFsProto.TableCFs tableCFs, + String table) { + for (int i = 0; i < tableCFs.getTableCfsCount(); i++) { + TableCFsProto.TableCF tableCF = tableCFs.getTableCfs(i); + if (tableCF.getTableName().getQualifier().toStringUtf8().equals(table)) { + return tableCF; + } + } + return null; + } + + /** + * Get index of TableCF in TableCFs, if not exist, return -1. + * */ + public static int getTableCFIndex(TableCFsProto.TableCFs tableCFs, + String table) { + for (int i = 0; i < tableCFs.getTableCfsCount(); i++) { + TableCFsProto.TableCF tableCF = tableCFs.getTableCfs(i); + if (tableCF.getTableName().getQualifier().toStringUtf8().equals(table)) { + return i; + } + } + return -1; + } + + /** + * Convert TableCFs to bytes, add PB_MAGIC in header. + * */ + public static byte[] toByteArray(final TableCFsProto.TableCFs tableCFs) { + if (tableCFs != null) { + return ProtobufUtil.prependPBMagic(tableCFs.toByteArray()); + } else { + return ProtobufMagic.PB_MAGIC; + } + } + + /** + * Parse bytes into TableCFs. + * It is used for backward compatibility. + * Old format bytes have no PB_MAGIC Header + * */ + public static TableCFsProto.TableCFs parseTableCFs(byte[] bytes) throws IOException { + if (bytes == null) { + return null; + } + if (ProtobufUtil.isPBMagicPrefix(bytes)) { + int pblen = ProtobufUtil.lengthOfPBMagic(); + TableCFsProto.TableCFs.Builder builder = TableCFsProto.TableCFs.newBuilder(); + ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); + return builder.build(); + } else { + // Old format is string, we should parse it. + return TableCFsHelper.convert(Bytes.toString(bytes)); + } + } + + /** + * Convert tableCFs string into Map. + * */ + public static Map> parseTableCFsFromConfig(String tableCFsConfig) { + TableCFsProto.TableCFs tableCFs = convert(tableCFsConfig); + if (tableCFs == null) { + return null; + } + Map> tableCFsMap = new HashMap>(); + + for (int i = 0; i < tableCFs.getTableCfsCount(); i++) { + TableCFsProto.TableCF tableCF = tableCFs.getTableCfs(i); + List families = new ArrayList<>(); + for (int j = 0; j < tableCF.getFamiliesCount(); j++) { + families.add(tableCF.getFamilies(j).toStringUtf8()); + } + if (families.size() > 0) { + tableCFsMap.put(ProtobufUtil.toTableName(tableCF.getTableName()), families); + } else { + tableCFsMap.put(ProtobufUtil.toTableName(tableCF.getTableName()), null); + } + } + + return tableCFsMap; + } + +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java index b8b5b22..bb5059c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java @@ -17,13 +17,10 @@ */ package org.apache.hadoop.hbase.replication; -import java.util.List; -import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; /** * ReplicationPeer manages enabled / disabled state for the peer. @@ -68,6 +65,6 @@ public interface ReplicationPeer { * Get replicable (table, cf-list) map of this peer * @return the replicable (table, cf-list) map */ - public Map> getTableCFs(); + public TableCFsProto.TableCFs getTableCFs(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java index 3ac8007..d4b44bd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java @@ -20,21 +20,18 @@ package org.apache.hadoop.hbase.replication; import java.io.Closeable; import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import com.google.protobuf.InvalidProtocolBufferException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; +import org.apache.hadoop.hbase.client.replication.TableCFsHelper; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -48,7 +45,7 @@ public class ReplicationPeerZKImpl implements ReplicationPeer, Abortable, Closea private final ReplicationPeerConfig peerConfig; private final String id; private volatile PeerState peerState; - private volatile Map> tableCFs = new HashMap>(); + private volatile TableCFsProto.TableCFs tableCFs = null; private final Configuration conf; private PeerStateTracker peerStateTracker; @@ -110,8 +107,16 @@ public class ReplicationPeerZKImpl implements ReplicationPeer, Abortable, Closea } private void readTableCFsZnode() { - String currentTableCFs = Bytes.toString(tableCFsTracker.getData(false)); - this.tableCFs = ReplicationAdmin.parseTableCFsFromConfig(currentTableCFs); + try { + byte[] bytes = tableCFsTracker.getData(false); + if (bytes != null) { + this.tableCFs = TableCFsHelper.parseTableCFs(bytes); + } + } catch (InvalidProtocolBufferException e) { + LOG.error("", e); + } catch (IOException e) { + LOG.error("", e); + } } @Override @@ -151,7 +156,7 @@ public class ReplicationPeerZKImpl implements ReplicationPeer, Abortable, Closea * @return the replicable (table, cf-list) map */ @Override - public Map> getTableCFs() { + public TableCFsProto.TableCFs getTableCFs() { return this.tableCFs; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java index 8e80e06..aef5124 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java @@ -23,8 +23,8 @@ import java.util.Map; import java.util.Set; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; import org.apache.hadoop.hbase.util.Pair; /** @@ -52,7 +52,8 @@ public interface ReplicationPeers { * @param tableCFs the table and column-family list which will be replicated for this peer or null * for all table and column families */ - void addPeer(String peerId, ReplicationPeerConfig peerConfig, String tableCFs) + void addPeer(String peerId, ReplicationPeerConfig peerConfig, + TableCFsProto.TableCFs tableCFs) throws ReplicationException; /** @@ -81,21 +82,16 @@ public interface ReplicationPeers { * Get the table and column-family list string of the peer from ZK. * @param peerId a short that identifies the cluster */ - public String getPeerTableCFsConfig(String peerId) throws ReplicationException; + public TableCFsProto.TableCFs getPeerTableCFsConfig(String peerId) + throws ReplicationException; /** * Set the table and column-family list string of the peer to ZK. * @param peerId a short that identifies the cluster * @param tableCFs the table and column-family list which will be replicated for this peer */ - public void setPeerTableCFsConfig(String peerId, String tableCFs) throws ReplicationException; - - /** - * Get the table and column-family-list map of the peer. - * @param peerId a short that identifies the cluster - * @return the table and column-family list which will be replicated for this peer - */ - public Map> getTableCFs(String peerId); + public void setPeerTableCFsConfig(String peerId, TableCFsProto.TableCFs tableCFs) + throws ReplicationException; /** * Returns the ReplicationPeer diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java index 1884469..61b8e79 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java @@ -33,11 +33,13 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.CompoundConfiguration; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.replication.TableCFsHelper; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.protobuf.ProtobufMagic; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState; import org.apache.hadoop.hbase.util.Bytes; @@ -106,7 +108,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re } @Override - public void addPeer(String id, ReplicationPeerConfig peerConfig, String tableCFs) + public void addPeer(String id, ReplicationPeerConfig peerConfig, TableCFsProto.TableCFs tableCFs) throws ReplicationException { try { if (peerExists(id)) { @@ -129,8 +131,8 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re // peer-state znode. This happens while adding a peer // The peer state data is set as "ENABLED" by default. ZKUtilOp op2 = ZKUtilOp.createAndFailSilent(getPeerStateNode(id), ENABLED_ZNODE_BYTES); - String tableCFsStr = (tableCFs == null) ? "" : tableCFs; - ZKUtilOp op3 = ZKUtilOp.createAndFailSilent(getTableCFsNode(id), Bytes.toBytes(tableCFsStr)); + ZKUtilOp op3 = ZKUtilOp.createAndFailSilent(getTableCFsNode(id), + TableCFsHelper.toByteArray(tableCFs)); listOfOps.add(op1); listOfOps.add(op2); listOfOps.add(op3); @@ -168,13 +170,13 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re } @Override - public String getPeerTableCFsConfig(String id) throws ReplicationException { + public TableCFsProto.TableCFs getPeerTableCFsConfig(String id) throws ReplicationException { try { if (!peerExists(id)) { throw new IllegalArgumentException("peer " + id + " doesn't exist"); } try { - return Bytes.toString(ZKUtil.getData(this.zookeeper, getTableCFsNode(id))); + return TableCFsHelper.parseTableCFs(ZKUtil.getData(this.zookeeper, getTableCFsNode(id))); } catch (Exception e) { throw new ReplicationException(e); } @@ -184,35 +186,26 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re } @Override - public void setPeerTableCFsConfig(String id, String tableCFsStr) throws ReplicationException { + public void setPeerTableCFsConfig(String id, TableCFsProto.TableCFs tableCFs) + throws ReplicationException { try { if (!peerExists(id)) { throw new IllegalArgumentException("Cannot set peer tableCFs because id=" + id + " does not exist."); } String tableCFsZKNode = getTableCFsNode(id); - byte[] tableCFs = Bytes.toBytes(tableCFsStr); if (ZKUtil.checkExists(this.zookeeper, tableCFsZKNode) != -1) { - ZKUtil.setData(this.zookeeper, tableCFsZKNode, tableCFs); + ZKUtil.setData(this.zookeeper, tableCFsZKNode, TableCFsHelper.toByteArray(tableCFs)); } else { - ZKUtil.createAndWatch(this.zookeeper, tableCFsZKNode, tableCFs); + ZKUtil.createAndWatch(this.zookeeper, tableCFsZKNode, TableCFsHelper.toByteArray(tableCFs)); } - LOG.info("Peer tableCFs with id= " + id + " is now " + tableCFsStr); + LOG.info("Peer tableCFs with id= " + id + " is now " + TableCFsHelper.convert(tableCFs)); } catch (KeeperException e) { throw new ReplicationException("Unable to change tableCFs of the peer with id=" + id, e); } } @Override - public Map> getTableCFs(String id) throws IllegalArgumentException { - ReplicationPeer replicationPeer = this.peerClusters.get(id); - if (replicationPeer == null) { - throw new IllegalArgumentException("Peer with id= " + id + " is not connected"); - } - return replicationPeer.getTableCFs(); - } - - @Override public boolean getStatusOfPeer(String id) { ReplicationPeer replicationPeer = this.peerClusters.get(id); if (replicationPeer == null) { diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableCFsProto.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableCFsProto.java new file mode 100644 index 0000000..b172349 --- /dev/null +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/TableCFsProto.java @@ -0,0 +1,1499 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: TableCFs.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class TableCFsProto { + private TableCFsProto() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface TableCFOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .hbase.pb.TableName table_name = 1; + /** + * optional .hbase.pb.TableName table_name = 1; + */ + boolean hasTableName(); + /** + * optional .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * optional .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + + // repeated bytes families = 2; + /** + * repeated bytes families = 2; + */ + java.util.List getFamiliesList(); + /** + * repeated bytes families = 2; + */ + int getFamiliesCount(); + /** + * repeated bytes families = 2; + */ + com.google.protobuf.ByteString getFamilies(int index); + } + /** + * Protobuf type {@code hbase.pb.TableCF} + */ + public static final class TableCF extends + com.google.protobuf.GeneratedMessage + implements TableCFOrBuilder { + // Use TableCF.newBuilder() to construct. + private TableCF(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableCF(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableCF defaultInstance; + public static TableCF getDefaultInstance() { + return defaultInstance; + } + + public TableCF getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableCF( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + families_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + families_.add(input.readBytes()); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + families_ = java.util.Collections.unmodifiableList(families_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCF_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCF_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.class, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableCF parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableCF(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional .hbase.pb.TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + // repeated bytes families = 2; + public static final int FAMILIES_FIELD_NUMBER = 2; + private java.util.List families_; + /** + * repeated bytes families = 2; + */ + public java.util.List + getFamiliesList() { + return families_; + } + /** + * repeated bytes families = 2; + */ + public int getFamiliesCount() { + return families_.size(); + } + /** + * repeated bytes families = 2; + */ + public com.google.protobuf.ByteString getFamilies(int index) { + return families_.get(index); + } + + private void initFields() { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + families_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasTableName()) { + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableName_); + } + for (int i = 0; i < families_.size(); i++) { + output.writeBytes(2, families_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_); + } + { + int dataSize = 0; + for (int i = 0; i < families_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(families_.get(i)); + } + size += dataSize; + size += 1 * getFamiliesList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF other = (org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && getFamiliesList() + .equals(other.getFamiliesList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (getFamiliesCount() > 0) { + hash = (37 * hash) + FAMILIES_FIELD_NUMBER; + hash = (53 * hash) + getFamiliesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.TableCF} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCF_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCF_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.class, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + families_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCF_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF build() { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF result = new org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + families_ = java.util.Collections.unmodifiableList(families_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.families_ = families_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.getDefaultInstance()) return this; + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (!other.families_.isEmpty()) { + if (families_.isEmpty()) { + families_ = other.families_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureFamiliesIsMutable(); + families_.addAll(other.families_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasTableName()) { + if (!getTableName().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .hbase.pb.TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * optional .hbase.pb.TableName table_name = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // repeated bytes families = 2; + private java.util.List families_ = java.util.Collections.emptyList(); + private void ensureFamiliesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + families_ = new java.util.ArrayList(families_); + bitField0_ |= 0x00000002; + } + } + /** + * repeated bytes families = 2; + */ + public java.util.List + getFamiliesList() { + return java.util.Collections.unmodifiableList(families_); + } + /** + * repeated bytes families = 2; + */ + public int getFamiliesCount() { + return families_.size(); + } + /** + * repeated bytes families = 2; + */ + public com.google.protobuf.ByteString getFamilies(int index) { + return families_.get(index); + } + /** + * repeated bytes families = 2; + */ + public Builder setFamilies( + int index, com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFamiliesIsMutable(); + families_.set(index, value); + onChanged(); + return this; + } + /** + * repeated bytes families = 2; + */ + public Builder addFamilies(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureFamiliesIsMutable(); + families_.add(value); + onChanged(); + return this; + } + /** + * repeated bytes families = 2; + */ + public Builder addAllFamilies( + java.lang.Iterable values) { + ensureFamiliesIsMutable(); + super.addAll(values, families_); + onChanged(); + return this; + } + /** + * repeated bytes families = 2; + */ + public Builder clearFamilies() { + families_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.TableCF) + } + + static { + defaultInstance = new TableCF(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.TableCF) + } + + public interface TableCFsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .hbase.pb.TableCF table_cfs = 1; + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + java.util.List + getTableCfsList(); + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF getTableCfs(int index); + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + int getTableCfsCount(); + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + java.util.List + getTableCfsOrBuilderList(); + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder getTableCfsOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.TableCFs} + */ + public static final class TableCFs extends + com.google.protobuf.GeneratedMessage + implements TableCFsOrBuilder { + // Use TableCFs.newBuilder() to construct. + private TableCFs(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableCFs(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableCFs defaultInstance; + public static TableCFs getDefaultInstance() { + return defaultInstance; + } + + public TableCFs getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableCFs( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + tableCfs_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + tableCfs_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + tableCfs_ = java.util.Collections.unmodifiableList(tableCfs_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCFs_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCFs_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.class, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableCFs parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableCFs(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .hbase.pb.TableCF table_cfs = 1; + public static final int TABLE_CFS_FIELD_NUMBER = 1; + private java.util.List tableCfs_; + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public java.util.List getTableCfsList() { + return tableCfs_; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public java.util.List + getTableCfsOrBuilderList() { + return tableCfs_; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public int getTableCfsCount() { + return tableCfs_.size(); + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF getTableCfs(int index) { + return tableCfs_.get(index); + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder getTableCfsOrBuilder( + int index) { + return tableCfs_.get(index); + } + + private void initFields() { + tableCfs_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getTableCfsCount(); i++) { + if (!getTableCfs(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < tableCfs_.size(); i++) { + output.writeMessage(1, tableCfs_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < tableCfs_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableCfs_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs other = (org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs) obj; + + boolean result = true; + result = result && getTableCfsList() + .equals(other.getTableCfsList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getTableCfsCount() > 0) { + hash = (37 * hash) + TABLE_CFS_FIELD_NUMBER; + hash = (53 * hash) + getTableCfsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.TableCFs} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCFs_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCFs_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.class, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableCfsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableCfsBuilder_ == null) { + tableCfs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + tableCfsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.internal_static_hbase_pb_TableCFs_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs build() { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs result = new org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs(this); + int from_bitField0_ = bitField0_; + if (tableCfsBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + tableCfs_ = java.util.Collections.unmodifiableList(tableCfs_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.tableCfs_ = tableCfs_; + } else { + result.tableCfs_ = tableCfsBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs.getDefaultInstance()) return this; + if (tableCfsBuilder_ == null) { + if (!other.tableCfs_.isEmpty()) { + if (tableCfs_.isEmpty()) { + tableCfs_ = other.tableCfs_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureTableCfsIsMutable(); + tableCfs_.addAll(other.tableCfs_); + } + onChanged(); + } + } else { + if (!other.tableCfs_.isEmpty()) { + if (tableCfsBuilder_.isEmpty()) { + tableCfsBuilder_.dispose(); + tableCfsBuilder_ = null; + tableCfs_ = other.tableCfs_; + bitField0_ = (bitField0_ & ~0x00000001); + tableCfsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableCfsFieldBuilder() : null; + } else { + tableCfsBuilder_.addAllMessages(other.tableCfs_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getTableCfsCount(); i++) { + if (!getTableCfs(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFs) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .hbase.pb.TableCF table_cfs = 1; + private java.util.List tableCfs_ = + java.util.Collections.emptyList(); + private void ensureTableCfsIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + tableCfs_ = new java.util.ArrayList(tableCfs_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder> tableCfsBuilder_; + + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public java.util.List getTableCfsList() { + if (tableCfsBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableCfs_); + } else { + return tableCfsBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public int getTableCfsCount() { + if (tableCfsBuilder_ == null) { + return tableCfs_.size(); + } else { + return tableCfsBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF getTableCfs(int index) { + if (tableCfsBuilder_ == null) { + return tableCfs_.get(index); + } else { + return tableCfsBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder setTableCfs( + int index, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF value) { + if (tableCfsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableCfsIsMutable(); + tableCfs_.set(index, value); + onChanged(); + } else { + tableCfsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder setTableCfs( + int index, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder builderForValue) { + if (tableCfsBuilder_ == null) { + ensureTableCfsIsMutable(); + tableCfs_.set(index, builderForValue.build()); + onChanged(); + } else { + tableCfsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder addTableCfs(org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF value) { + if (tableCfsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableCfsIsMutable(); + tableCfs_.add(value); + onChanged(); + } else { + tableCfsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder addTableCfs( + int index, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF value) { + if (tableCfsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableCfsIsMutable(); + tableCfs_.add(index, value); + onChanged(); + } else { + tableCfsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder addTableCfs( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder builderForValue) { + if (tableCfsBuilder_ == null) { + ensureTableCfsIsMutable(); + tableCfs_.add(builderForValue.build()); + onChanged(); + } else { + tableCfsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder addTableCfs( + int index, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder builderForValue) { + if (tableCfsBuilder_ == null) { + ensureTableCfsIsMutable(); + tableCfs_.add(index, builderForValue.build()); + onChanged(); + } else { + tableCfsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder addAllTableCfs( + java.lang.Iterable values) { + if (tableCfsBuilder_ == null) { + ensureTableCfsIsMutable(); + super.addAll(values, tableCfs_); + onChanged(); + } else { + tableCfsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder clearTableCfs() { + if (tableCfsBuilder_ == null) { + tableCfs_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + tableCfsBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public Builder removeTableCfs(int index) { + if (tableCfsBuilder_ == null) { + ensureTableCfsIsMutable(); + tableCfs_.remove(index); + onChanged(); + } else { + tableCfsBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder getTableCfsBuilder( + int index) { + return getTableCfsFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder getTableCfsOrBuilder( + int index) { + if (tableCfsBuilder_ == null) { + return tableCfs_.get(index); } else { + return tableCfsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public java.util.List + getTableCfsOrBuilderList() { + if (tableCfsBuilder_ != null) { + return tableCfsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableCfs_); + } + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder addTableCfsBuilder() { + return getTableCfsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder addTableCfsBuilder( + int index) { + return getTableCfsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableCF table_cfs = 1; + */ + public java.util.List + getTableCfsBuilderList() { + return getTableCfsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder> + getTableCfsFieldBuilder() { + if (tableCfsBuilder_ == null) { + tableCfsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCF.Builder, org.apache.hadoop.hbase.protobuf.generated.TableCFsProto.TableCFOrBuilder>( + tableCfs_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + tableCfs_ = null; + } + return tableCfsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.TableCFs) + } + + static { + defaultInstance = new TableCFs(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.TableCFs) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_TableCF_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_TableCF_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_TableCFs_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_TableCFs_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\016TableCFs.proto\022\010hbase.pb\032\013HBase.proto\"" + + "D\n\007TableCF\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.p" + + "b.TableName\022\020\n\010families\030\002 \003(\014\"0\n\010TableCF" + + "s\022$\n\ttable_cfs\030\001 \003(\0132\021.hbase.pb.TableCFB" + + "@\n*org.apache.hadoop.hbase.protobuf.gene" + + "ratedB\rTableCFsProtoH\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_hbase_pb_TableCF_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_hbase_pb_TableCF_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_TableCF_descriptor, + new java.lang.String[] { "TableName", "Families", }); + internal_static_hbase_pb_TableCFs_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_hbase_pb_TableCFs_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_TableCFs_descriptor, + new java.lang.String[] { "TableCfs", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hbase-protocol/src/main/protobuf/TableCFs.proto b/hbase-protocol/src/main/protobuf/TableCFs.proto new file mode 100644 index 0000000..b27310e --- /dev/null +++ b/hbase-protocol/src/main/protobuf/TableCFs.proto @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Cell and KeyValue protos +package hbase.pb; + +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "TableCFsProto"; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "HBase.proto"; + +message TableCF { + optional TableName table_name = 1; + repeated bytes families = 2; +} + +message TableCFs { + repeated TableCF table_cfs = 1; +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java index 0cbbcef..4316714 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/TableCfWALEntryFilter.java @@ -20,13 +20,15 @@ package org.apache.hadoop.hbase.replication; import java.util.ArrayList; import java.util.List; -import java.util.Map; +import com.google.protobuf.ByteString; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.replication.TableCFsHelper; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL.Entry; @@ -43,29 +45,32 @@ public class TableCfWALEntryFilter implements WALEntryFilter { public Entry filter(Entry entry) { TableName tabName = entry.getKey().getTablename(); ArrayList cells = entry.getEdit().getCells(); - Map> tableCFs = null; - - try { - tableCFs = this.peer.getTableCFs(); - } catch (IllegalArgumentException e) { - LOG.error("should not happen: can't get tableCFs for peer " + peer.getId() + - ", degenerate as if it's not configured by keeping tableCFs==null"); + TableCFsProto.TableCFs tableCFs = this.peer.getTableCFs(); + if (tableCFs == null) { + //It means there is no zNode, or we has't read it, do nothing. + return null; } int size = cells.size(); + if (tableCFs.getTableCfsCount() == 0) { + // It means we not filter anthing + return entry; + } - // return null(prevent replicating) if logKey's table isn't in this peer's - // replicable table list (empty tableCFs means all table are replicable) - if (tableCFs != null && !tableCFs.containsKey(tabName)) { + + TableCFsProto.TableCF tableCF = TableCFsHelper.getTableCF(tableCFs, tabName.toString()); + if (tableCF == null) { return null; - } else { - List cfs = (tableCFs == null) ? null : tableCFs.get(tabName); - for (int i = size - 1; i >= 0; i--) { - Cell cell = cells.get(i); - // ignore(remove) kv if its cf isn't in the replicable cf list - // (empty cfs means all cfs of this table are replicable) - if ((cfs != null && !cfs.contains(Bytes.toString(CellUtil.cloneFamily(cell))))) { - cells.remove(i); - } + } + List cfs = new ArrayList<>(tableCF.getFamiliesCount()); + for (ByteString bs : tableCF.getFamiliesList()) { + cfs.add(bs.toStringUtf8()); + } + for (int i = size - 1; i >= 0; i--) { + Cell cell = cells.get(i); + // ignore(remove) kv if its cf isn't in the replicable cf list + // (empty cfs means all cfs of this table are replicable) + if ((cfs.size() != 0 && !cfs.contains(Bytes.toString(CellUtil.cloneFamily(cell))))) { + cells.remove(i); } } if (cells.size() < size/2) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java index e187b9b..1ddcb6a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java @@ -170,37 +170,6 @@ public class TestReplicationAdmin { } @Test - public void testGetTableCfsStr() { - // opposite of TestPerTableCFReplication#testParseTableCFsFromConfig() - - Map> tabCFsMap = null; - - // 1. null or empty string, result should be null - assertEquals(null, ReplicationAdmin.getTableCfsStr(tabCFsMap)); - - - // 2. single table: "tab1" / "tab2:cf1" / "tab3:cf1,cf3" - tabCFsMap = new TreeMap>(); - tabCFsMap.put(TableName.valueOf("tab1"), null); // its table name is "tab1" - assertEquals("tab1", ReplicationAdmin.getTableCfsStr(tabCFsMap)); - - tabCFsMap = new TreeMap>(); - tabCFsMap.put(TableName.valueOf("tab1"), Lists.newArrayList("cf1")); - assertEquals("tab1:cf1", ReplicationAdmin.getTableCfsStr(tabCFsMap)); - - tabCFsMap = new TreeMap>(); - tabCFsMap.put(TableName.valueOf("tab1"), Lists.newArrayList("cf1", "cf3")); - assertEquals("tab1:cf1,cf3", ReplicationAdmin.getTableCfsStr(tabCFsMap)); - - // 3. multiple tables: "tab1 ; tab2:cf1 ; tab3:cf1,cf3" - tabCFsMap = new TreeMap>(); - tabCFsMap.put(TableName.valueOf("tab1"), null); - tabCFsMap.put(TableName.valueOf("tab2"), Lists.newArrayList("cf1")); - tabCFsMap.put(TableName.valueOf("tab3"), Lists.newArrayList("cf1", "cf3")); - assertEquals("tab1;tab2:cf1;tab3:cf1,cf3", ReplicationAdmin.getTableCfsStr(tabCFsMap)); - } - - @Test public void testAppendPeerTableCFs() throws Exception { // Add a valid peer admin.addPeer(ID_ONE, KEY_ONE); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java index 29a052b..d94d9a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java @@ -19,13 +19,9 @@ */ package org.apache.hadoop.hbase.replication; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -47,7 +43,10 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; +import org.apache.hadoop.hbase.client.replication.TableCFsHelper; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; import org.apache.hadoop.hbase.testclassification.FlakeyTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -58,6 +57,8 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import static org.junit.Assert.*; + @Category({FlakeyTests.class, LargeTests.class}) public class TestPerTableCFReplication { @@ -184,13 +185,13 @@ public class TestPerTableCFReplication { Map> tabCFsMap = null; // 1. null or empty string, result should be null - tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig(null); + tabCFsMap = TableCFsHelper.parseTableCFsFromConfig(null); assertEquals(null, tabCFsMap); - tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig(""); + tabCFsMap = TableCFsHelper.parseTableCFsFromConfig(""); assertEquals(null, tabCFsMap); - tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig(" "); + tabCFsMap = TableCFsHelper.parseTableCFsFromConfig(" "); assertEquals(null, tabCFsMap); TableName tab1 = TableName.valueOf("tab1"); @@ -198,20 +199,20 @@ public class TestPerTableCFReplication { TableName tab3 = TableName.valueOf("tab3"); // 2. single table: "tab1" / "tab2:cf1" / "tab3:cf1,cf3" - tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("tab1"); + tabCFsMap = TableCFsHelper.parseTableCFsFromConfig("tab1"); assertEquals(1, tabCFsMap.size()); // only one table assertTrue(tabCFsMap.containsKey(tab1)); // its table name is "tab1" assertFalse(tabCFsMap.containsKey(tab2)); // not other table assertEquals(null, tabCFsMap.get(tab1)); // null cf-list, - tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("tab2:cf1"); + tabCFsMap = TableCFsHelper.parseTableCFsFromConfig("tab2:cf1"); assertEquals(1, tabCFsMap.size()); // only one table assertTrue(tabCFsMap.containsKey(tab2)); // its table name is "tab2" assertFalse(tabCFsMap.containsKey(tab1)); // not other table assertEquals(1, tabCFsMap.get(tab2).size()); // cf-list contains only 1 cf assertEquals("cf1", tabCFsMap.get(tab2).get(0));// the only cf is "cf1" - tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("tab3 : cf1 , cf3"); + tabCFsMap = TableCFsHelper.parseTableCFsFromConfig("tab3 : cf1 , cf3"); assertEquals(1, tabCFsMap.size()); // only one table assertTrue(tabCFsMap.containsKey(tab3)); // its table name is "tab2" assertFalse(tabCFsMap.containsKey(tab1)); // not other table @@ -220,7 +221,7 @@ public class TestPerTableCFReplication { assertTrue(tabCFsMap.get(tab3).contains("cf3"));// contains "cf3" // 3. multiple tables: "tab1 ; tab2:cf1 ; tab3:cf1,cf3" - tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig("tab1 ; tab2:cf1 ; tab3:cf1,cf3"); + tabCFsMap = TableCFsHelper.parseTableCFsFromConfig("tab1 ; tab2:cf1 ; tab3:cf1,cf3"); // 3.1 contains 3 tables : "tab1", "tab2" and "tab3" assertEquals(3, tabCFsMap.size()); assertTrue(tabCFsMap.containsKey(tab1)); @@ -238,7 +239,7 @@ public class TestPerTableCFReplication { // 4. contiguous or additional ";"(table delimiter) or ","(cf delimiter) can be tolerated // still use the example of multiple tables: "tab1 ; tab2:cf1 ; tab3:cf1,cf3" - tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig( + tabCFsMap = TableCFsHelper.parseTableCFsFromConfig( "tab1 ; ; tab2:cf1 ; tab3:cf1,,cf3 ;"); // 4.1 contains 3 tables : "tab1", "tab2" and "tab3" assertEquals(3, tabCFsMap.size()); @@ -257,7 +258,7 @@ public class TestPerTableCFReplication { // 5. invalid format "tab1:tt:cf1 ; tab2::cf1 ; tab3:cf1,cf3" // "tab1:tt:cf1" and "tab2::cf1" are invalid and will be ignored totally - tabCFsMap = ReplicationAdmin.parseTableCFsFromConfig( + tabCFsMap = TableCFsHelper.parseTableCFsFromConfig( "tab1:tt:cf1 ; tab2::cf1 ; tab3:cf1,cf3"); // 5.1 no "tab1" and "tab2", only "tab3" assertEquals(1, tabCFsMap.size()); // only one table @@ -270,6 +271,115 @@ public class TestPerTableCFReplication { assertTrue(tabCFsMap.get(tab3).contains("cf3")); } + @Test + public void testTableCFsHelperConverter() { + + TableCFsProto.TableCFs tableCFs = null; + Map> tabCFsMap = null; + + // 1. null or empty string, result should be null + assertNull(TableCFsHelper.convert(tabCFsMap)); + + tabCFsMap = new HashMap>(); + tableCFs = TableCFsHelper.convert(tabCFsMap); + assertEquals(0, tableCFs.getTableCfsCount()); + + TableName tab1 = TableName.valueOf("tab1"); + TableName tab2 = TableName.valueOf("tab2"); + TableName tab3 = TableName.valueOf("tab3"); + + // 2. single table: "tab1" / "tab2:cf1" / "tab3:cf1,cf3" + tabCFsMap.clear(); + tabCFsMap.put(tab1, null); + tableCFs = TableCFsHelper.convert(tabCFsMap); + assertEquals(1, tableCFs.getTableCfsCount()); // only one table + assertEquals(tab1.toString(), + tableCFs.getTableCfs(0).getTableName().getQualifier().toStringUtf8()); + assertEquals(0, tableCFs.getTableCfs(0).getFamiliesCount()); + + tabCFsMap.clear(); + tabCFsMap.put(tab2, new ArrayList()); + tabCFsMap.get(tab2).add("cf1"); + tableCFs = TableCFsHelper.convert(tabCFsMap); + assertEquals(1, tableCFs.getTableCfsCount()); // only one table + assertEquals(tab2.toString(), + tableCFs.getTableCfs(0).getTableName().getQualifier().toStringUtf8()); + assertEquals(1, tableCFs.getTableCfs(0).getFamiliesCount()); + assertEquals("cf1", tableCFs.getTableCfs(0).getFamilies(0).toStringUtf8()); + + tabCFsMap.clear(); + tabCFsMap.put(tab3, new ArrayList()); + tabCFsMap.get(tab3).add("cf1"); + tabCFsMap.get(tab3).add("cf3"); + tableCFs = TableCFsHelper.convert(tabCFsMap); + assertEquals(1, tableCFs.getTableCfsCount()); + assertEquals(tab3.toString(), + tableCFs.getTableCfs(0).getTableName().getQualifier().toStringUtf8()); + assertEquals(2, tableCFs.getTableCfs(0).getFamiliesCount()); + assertEquals("cf1", tableCFs.getTableCfs(0).getFamilies(0).toStringUtf8()); + assertEquals("cf3", tableCFs.getTableCfs(0).getFamilies(1).toStringUtf8()); + + tabCFsMap.clear(); + tabCFsMap.put(tab1, null); + tabCFsMap.put(tab2, new ArrayList()); + tabCFsMap.get(tab2).add("cf1"); + tabCFsMap.put(tab3, new ArrayList()); + tabCFsMap.get(tab3).add("cf1"); + tabCFsMap.get(tab3).add("cf3"); + + tableCFs = TableCFsHelper.convert(tabCFsMap); + assertEquals(3, tableCFs.getTableCfsCount()); + assertNotNull(TableCFsHelper.getTableCF(tableCFs, tab1.toString())); + assertNotNull(TableCFsHelper.getTableCF(tableCFs, tab2.toString())); + assertNotNull(TableCFsHelper.getTableCF(tableCFs, tab3.toString())); + + assertEquals(0, + TableCFsHelper.getTableCF(tableCFs, tab1.toString()).getFamiliesCount()); + + assertEquals(1, + TableCFsHelper.getTableCF(tableCFs, tab2.toString()).getFamiliesCount()); + assertEquals("cf1", + TableCFsHelper.getTableCF(tableCFs, tab2.toString()).getFamilies(0).toStringUtf8()); + + assertEquals(2, + TableCFsHelper.getTableCF(tableCFs, tab3.toString()).getFamiliesCount()); + assertEquals("cf1", + TableCFsHelper.getTableCF(tableCFs, tab3.toString()).getFamilies(0).toStringUtf8()); + assertEquals("cf3", + TableCFsHelper.getTableCF(tableCFs, tab3.toString()).getFamilies(1).toStringUtf8()); + } + + @Test + public void testOldFormatTableCFsReading() throws IOException { + TableName tab1 = TableName.valueOf("tab1"); + TableName tab2 = TableName.valueOf("tab2"); + TableName tab3 = TableName.valueOf("tab3"); + + byte[] bytes = Bytes.toBytes("tab1 ; tab2:cf1 ; tab3:cf1,cf3"); + TableCFsProto.TableCFs tableCFs = TableCFsHelper.parseTableCFs(bytes); + assertEquals(3, tableCFs.getTableCfsCount()); + assertNotNull(TableCFsHelper.getTableCF(tableCFs, tab1.toString())); + assertNotNull(TableCFsHelper.getTableCF(tableCFs, tab2.toString())); + assertNotNull(TableCFsHelper.getTableCF(tableCFs, tab3.toString())); + assertEquals(0, + TableCFsHelper.getTableCF(tableCFs, tab1.toString()).getFamiliesCount()); + assertEquals(1, + TableCFsHelper.getTableCF(tableCFs, tab2.toString()).getFamiliesCount()); + assertEquals("cf1", + TableCFsHelper.getTableCF(tableCFs, tab2.toString()).getFamilies(0).toStringUtf8()); + assertEquals(2, + TableCFsHelper.getTableCF(tableCFs, tab3.toString()).getFamiliesCount()); + assertEquals("cf1", + TableCFsHelper.getTableCF(tableCFs, tab3.toString()).getFamilies(0).toStringUtf8()); + assertEquals("cf3", + TableCFsHelper.getTableCF(tableCFs, tab3.toString()).getFamilies(1).toStringUtf8()); + + assertFalse(ProtobufUtil.isPBMagicPrefix(bytes)); + bytes = TableCFsHelper.toByteArray(tableCFs); + assertTrue(ProtobufUtil.isPBMagicPrefix(bytes)); + } + + @Test(timeout=300000) public void testPerTableCFReplication() throws Exception { LOG.info("testPerTableCFReplication"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java index 22c421d..b394482 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.replication.TableCFsHelper; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.TableName; @@ -208,20 +209,20 @@ public class TestReplicationWALEntryFilters { when(peer.getTableCFs()).thenReturn(null); Entry userEntry = createEntry(a, b, c); TableCfWALEntryFilter filter = new TableCfWALEntryFilter(peer); - assertEquals(createEntry(a,b,c), filter.filter(userEntry)); + assertEquals(null, filter.filter(userEntry)); // empty map userEntry = createEntry(a, b, c); Map> tableCfs = new HashMap>(); - when(peer.getTableCFs()).thenReturn(tableCfs); + when(peer.getTableCFs()).thenReturn(TableCFsHelper.convert(tableCfs)); filter = new TableCfWALEntryFilter(peer); - assertEquals(null, filter.filter(userEntry)); + assertEquals(createEntry(a,b,c), filter.filter(userEntry)); // table bar userEntry = createEntry(a, b, c); tableCfs = new HashMap>(); tableCfs.put(TableName.valueOf("bar"), null); - when(peer.getTableCFs()).thenReturn(tableCfs); + when(peer.getTableCFs()).thenReturn(TableCFsHelper.convert(tableCfs)); filter = new TableCfWALEntryFilter(peer); assertEquals(null, filter.filter(userEntry)); @@ -229,7 +230,7 @@ public class TestReplicationWALEntryFilters { userEntry = createEntry(a, b, c); tableCfs = new HashMap>(); tableCfs.put(TableName.valueOf("foo"), Lists.newArrayList("a")); - when(peer.getTableCFs()).thenReturn(tableCfs); + when(peer.getTableCFs()).thenReturn(TableCFsHelper.convert(tableCfs)); filter = new TableCfWALEntryFilter(peer); assertEquals(createEntry(a), filter.filter(userEntry)); @@ -237,7 +238,7 @@ public class TestReplicationWALEntryFilters { userEntry = createEntry(a, b, c, d); tableCfs = new HashMap>(); tableCfs.put(TableName.valueOf("foo"), Lists.newArrayList("a", "c")); - when(peer.getTableCFs()).thenReturn(tableCfs); + when(peer.getTableCFs()).thenReturn(TableCFsHelper.convert(tableCfs)); filter = new TableCfWALEntryFilter(peer); assertEquals(createEntry(a,c), filter.filter(userEntry)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java index a870ed8..7653c9f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.WALCoprocessorEnvironment; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; +import org.apache.hadoop.hbase.protobuf.generated.TableCFsProto; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster; @@ -281,7 +282,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster { when(context.getMetrics()).thenReturn(mock(MetricsSource.class)); ReplicationPeer mockPeer = mock(ReplicationPeer.class); - when(mockPeer.getTableCFs()).thenReturn(null); + when(mockPeer.getTableCFs()).thenReturn(TableCFsProto.TableCFs.newBuilder().build()); when(context.getReplicationPeer()).thenReturn(mockPeer); replicator.init(context); -- 1.9.3 (Apple Git-50)