From 91a0a5702ecf8dff43aa2f20dec0df23de9dbdd4 Mon Sep 17 00:00:00 2001 From: rahulgidwani Date: Tue, 6 Jan 2015 17:12:16 -0800 Subject: [PATCH] much better --- .../hbase/client/replication/ReplicationAdmin.java | 11 + .../hadoop/hbase/replication/ReplicationPeer.java | 45 + .../hbase/replication/ReplicationPeerConfig.java | 18 +- .../hbase/replication/ReplicationPeersZKImpl.java | 3 +- .../hbase/protobuf/generated/ZooKeeperProtos.java | 219 +- hbase-protocol/src/main/protobuf/ZooKeeper.proto | 5 + hbase-server/pom.xml | 11 + .../HBaseInterClusterReplicationEndpoint.java | 13 +- .../replication/regionserver/Replication.java | 8 +- .../replication/regionserver/ReplicationSink.java | 59 +- .../regionserver/ReplicationSinkManager.java | 2 +- .../thrift/ChainedTTransportFactory.java | 40 + .../hbase/replication/thrift/TFilterTransport.java | 99 + .../replication/thrift/TUGIAssumingTransport.java | 63 + .../thrift/TUGIAssumingTransportFactory.java | 55 + .../hbase/replication/thrift/ThriftAdaptors.java | 158 ++ .../hbase/replication/thrift/ThriftClient.java | 187 ++ .../hbase/replication/thrift/ThriftEditType.java | 75 + .../thrift/ThriftHBaseReplicationEndpoint.java | 61 + .../thrift/ThriftHBaseServiceHandler.java | 102 + .../hbase/replication/thrift/ThriftMetrics.java | 83 + .../hbase/replication/thrift/ThriftServer.java | 173 ++ .../hbase/replication/thrift/ThriftUtilities.java | 71 + .../replication/thrift/generated/TBatchEdit.java | 435 ++++ .../replication/thrift/generated/TClusterId.java | 480 ++++ .../replication/thrift/generated/TColumnValue.java | 910 ++++++++ .../hbase/replication/thrift/generated/TEdit.java | 637 ++++++ .../thrift/generated/THBaseService.java | 2310 ++++++++++++++++++++ .../replication/thrift/generated/THLogKey.java | 582 +++++ .../replication/thrift/generated/TIOError.java | 391 ++++ .../thrift/generated/TIllegalArgument.java | 390 ++++ .../replication/thrift/generated/TWalLEdit.java | 439 ++++ .../org/apache/hadoop/hbase/thrift/hbase.thrift | 97 + .../client/replication/TestReplicationAdmin.java | 9 + .../regionserver/TestReplicationSink.java | 6 +- .../replication/thrift/ReplicationTestUtils.java | 201 ++ .../thrift/TestThriftCyclicReplication.java | 126 ++ .../thrift/TestThriftMasterMasterReplication.java | 104 + .../thrift/TestThriftMultiSlaveReplication.java | 114 + .../thrift/TestThriftReplicationBase.java | 34 + .../thrift/TestThriftReplicationSink.java | 265 +++ .../replication/thrift/ThriftAdaptorsTest.java | 98 + .../src/main/ruby/hbase/replication_admin.rb | 4 +- .../src/main/ruby/shell/commands/add_peer.rb | 6 +- pom.xml | 6 +- 45 files changed, 9159 insertions(+), 46 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ChainedTTransportFactory.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/TFilterTransport.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/TUGIAssumingTransport.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/TUGIAssumingTransportFactory.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftAdaptors.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftClient.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftEditType.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftHBaseReplicationEndpoint.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftHBaseServiceHandler.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftMetrics.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftServer.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftUtilities.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TBatchEdit.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TClusterId.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TColumnValue.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TEdit.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/THBaseService.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/THLogKey.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TIOError.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TIllegalArgument.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TWalLEdit.java create mode 100644 hbase-server/src/main/resources/org/apache/hadoop/hbase/thrift/hbase.thrift create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/ReplicationTestUtils.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftCyclicReplication.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftMasterMasterReplication.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftMultiSlaveReplication.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftReplicationBase.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftReplicationSink.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/ThriftAdaptorsTest.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java index 6e5eb2e..8110d84 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeers; +import org.apache.hadoop.hbase.replication.ReplicationPeer; import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -155,6 +156,16 @@ public class ReplicationAdmin implements Closeable { new ReplicationPeerConfig().setClusterKey(clusterKey), tableCFs); } + @Deprecated + public void addPeer(String id, String clusterKey, String tableCFs, String protocol) + throws ReplicationException { + ReplicationPeerConfig config = new ReplicationPeerConfig().setClusterKey(clusterKey); + if (StringUtils.isNotBlank(protocol)) { + config = config.setProtocol(ReplicationPeer.PeerProtocol.valueOf(protocol)); + } + this.replicationPeers.addPeer(id, config, tableCFs); + } + /** * Add a new remote slave cluster for replication. * @param id a short name that identifies the cluster diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java index 9a95394..979d18a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java @@ -17,12 +17,17 @@ */ package org.apache.hadoop.hbase.replication; +import java.util.Arrays; import java.util.List; import java.util.Map; +import com.google.common.base.Function; +import com.google.common.collect.Maps; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; + /** * ReplicationPeer manages enabled / disabled state for the peer. @@ -39,6 +44,46 @@ public interface ReplicationPeer { } /** + * RPC protocol of the peer, Thrift OR Native + * Have to use strings as ReplicationEndpointImpl as classes are in are in hbase-server module + */ + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) + enum PeerProtocol { + NATIVE(ZooKeeperProtos.ReplicationPeer.Protocol.NATIVE, + "org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint"), + THRIFT(ZooKeeperProtos.ReplicationPeer.Protocol.THRIFT, + "org.apache.hadoop.hbase.replication.thrift.ThriftHBaseReplicationEndpoint"); + + private static final Map map = + Maps.uniqueIndex(Arrays.asList(PeerProtocol.values()), + new Function() { + @Override public ZooKeeperProtos.ReplicationPeer.Protocol apply(PeerProtocol input) { + return input.getProtocol(); + } + }); + + private final ZooKeeperProtos.ReplicationPeer.Protocol protocol; + private final String replicationEndpointImpl; + + PeerProtocol(ZooKeeperProtos.ReplicationPeer.Protocol protocol, String replicationEndpointImpl) { + this.protocol = protocol; + this.replicationEndpointImpl = replicationEndpointImpl; + } + + public ZooKeeperProtos.ReplicationPeer.Protocol getProtocol() { + return protocol; + } + + public static PeerProtocol fromProtobuf(ZooKeeperProtos.ReplicationPeer.Protocol protocol) { + return map.get(protocol); + } + + public String getReplicationEndpointImpl() { + return replicationEndpointImpl; + } + } + + /** * Get the identifier of this peer * * @return string representation of the id diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java index 8b8bab7..f2a8a89 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java @@ -35,6 +35,7 @@ public class ReplicationPeerConfig { private String clusterKey; private String replicationEndpointImpl; + private ReplicationPeer.PeerProtocol protocol = ReplicationPeer.PeerProtocol.NATIVE; private final Map peerData; private final Map configuration; @@ -62,6 +63,16 @@ public class ReplicationPeerConfig { return this; } + /** + * Sets the ReplicationRPCProtocol for this peer. + * @param protocol [NATIVE, THRIFT] + */ + public ReplicationPeerConfig setProtocol(ReplicationPeer.PeerProtocol protocol) { + this.protocol = protocol; + setReplicationEndpointImpl(protocol.getReplicationEndpointImpl()); + return this; + } + public String getClusterKey() { return clusterKey; } @@ -78,10 +89,15 @@ public class ReplicationPeerConfig { return configuration; } + public ReplicationPeer.PeerProtocol getProtocol() { + return protocol; + } + @Override public String toString() { StringBuilder builder = new StringBuilder("clusterKey=").append(clusterKey).append(","); - builder.append("replicationEndpointImpl=").append(replicationEndpointImpl); + builder.append("replicationEndpointImpl=").append(replicationEndpointImpl).append(","); + builder.append("rpcProtocol=").append(protocol.name()); return builder.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java index 6631509..4aa6d62 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java @@ -508,6 +508,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re for (NameStringPair pair : peer.getConfigurationList()) { peerConfig.getConfiguration().put(pair.getName(), pair.getValue()); } + peerConfig.setProtocol(ReplicationPeer.PeerProtocol.fromProtobuf(peer.getPeerProtocol())); return peerConfig; } @@ -533,7 +534,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re .setValue(entry.getValue()) .build()); } - + builder.setPeerProtocol(peerConfig.getProtocol().getProtocol()); return builder.build(); } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java index 4989f6e..b5858ec 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java @@ -5024,6 +5024,16 @@ public final class ZooKeeperProtos { */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getConfigurationOrBuilder( int index); + + // optional .ReplicationPeer.Protocol peerProtocol = 5 [default = NATIVE]; + /** + * optional .ReplicationPeer.Protocol peerProtocol = 5 [default = NATIVE]; + */ + boolean hasPeerProtocol(); + /** + * optional .ReplicationPeer.Protocol peerProtocol = 5 [default = NATIVE]; + */ + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Protocol getPeerProtocol(); } /** * Protobuf type {@code ReplicationPeer} @@ -5107,6 +5117,17 @@ public final class ZooKeeperProtos { configuration_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.PARSER, extensionRegistry)); break; } + case 40: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Protocol value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Protocol.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(5, rawValue); + } else { + bitField0_ |= 0x00000004; + peerProtocol_ = value; + } + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -5152,6 +5173,88 @@ public final class ZooKeeperProtos { return PARSER; } + /** + * Protobuf enum {@code ReplicationPeer.Protocol} + */ + public enum Protocol + implements com.google.protobuf.ProtocolMessageEnum { + /** + * NATIVE = 0; + */ + NATIVE(0, 0), + /** + * THRIFT = 1; + */ + THRIFT(1, 1), + ; + + /** + * NATIVE = 0; + */ + public static final int NATIVE_VALUE = 0; + /** + * THRIFT = 1; + */ + public static final int THRIFT_VALUE = 1; + + + public final int getNumber() { return value; } + + public static Protocol valueOf(int value) { + switch (value) { + case 0: return NATIVE; + case 1: return THRIFT; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Protocol findValueByNumber(int number) { + return Protocol.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDescriptor().getEnumTypes().get(0); + } + + private static final Protocol[] VALUES = values(); + + public static Protocol valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private Protocol(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:ReplicationPeer.Protocol) + } + private int bitField0_; // required string clusterkey = 1; public static final int CLUSTERKEY_FIELD_NUMBER = 1; @@ -5326,11 +5429,28 @@ public final class ZooKeeperProtos { return configuration_.get(index); } + // optional .ReplicationPeer.Protocol peerProtocol = 5 [default = NATIVE]; + public static final int PEERPROTOCOL_FIELD_NUMBER = 5; + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Protocol peerProtocol_; + /** + * optional .ReplicationPeer.Protocol peerProtocol = 5 [default = NATIVE]; + */ + public boolean hasPeerProtocol() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .ReplicationPeer.Protocol peerProtocol = 5 [default = NATIVE]; + */ + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Protocol getPeerProtocol() { + return peerProtocol_; + } + private void initFields() { clusterkey_ = ""; replicationEndpointImpl_ = ""; data_ = java.util.Collections.emptyList(); configuration_ = java.util.Collections.emptyList(); + peerProtocol_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Protocol.NATIVE; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -5372,6 +5492,9 @@ public final class ZooKeeperProtos { for (int i = 0; i < configuration_.size(); i++) { output.writeMessage(4, configuration_.get(i)); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeEnum(5, peerProtocol_.getNumber()); + } getUnknownFields().writeTo(output); } @@ -5397,6 +5520,10 @@ public final class ZooKeeperProtos { size += com.google.protobuf.CodedOutputStream .computeMessageSize(4, configuration_.get(i)); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(5, peerProtocol_.getNumber()); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -5434,6 +5561,11 @@ public final class ZooKeeperProtos { .equals(other.getDataList()); result = result && getConfigurationList() .equals(other.getConfigurationList()); + result = result && (hasPeerProtocol() == other.hasPeerProtocol()); + if (hasPeerProtocol()) { + result = result && + (getPeerProtocol() == other.getPeerProtocol()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -5463,6 +5595,10 @@ public final class ZooKeeperProtos { hash = (37 * hash) + CONFIGURATION_FIELD_NUMBER; hash = (53 * hash) + getConfigurationList().hashCode(); } + if (hasPeerProtocol()) { + hash = (37 * hash) + PEERPROTOCOL_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getPeerProtocol()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -5595,6 +5731,8 @@ public final class ZooKeeperProtos { } else { configurationBuilder_.clear(); } + peerProtocol_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Protocol.NATIVE; + bitField0_ = (bitField0_ & ~0x00000010); return this; } @@ -5649,6 +5787,10 @@ public final class ZooKeeperProtos { } else { result.configuration_ = configurationBuilder_.build(); } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000004; + } + result.peerProtocol_ = peerProtocol_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -5727,6 +5869,9 @@ public final class ZooKeeperProtos { } } } + if (other.hasPeerProtocol()) { + setPeerProtocol(other.getPeerProtocol()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -6428,6 +6573,42 @@ public final class ZooKeeperProtos { return configurationBuilder_; } + // optional .ReplicationPeer.Protocol peerProtocol = 5 [default = NATIVE]; + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Protocol peerProtocol_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Protocol.NATIVE; + /** + * optional .ReplicationPeer.Protocol peerProtocol = 5 [default = NATIVE]; + */ + public boolean hasPeerProtocol() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .ReplicationPeer.Protocol peerProtocol = 5 [default = NATIVE]; + */ + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Protocol getPeerProtocol() { + return peerProtocol_; + } + /** + * optional .ReplicationPeer.Protocol peerProtocol = 5 [default = NATIVE]; + */ + public Builder setPeerProtocol(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Protocol value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + peerProtocol_ = value; + onChanged(); + return this; + } + /** + * optional .ReplicationPeer.Protocol peerProtocol = 5 [default = NATIVE]; + */ + public Builder clearPeerProtocol() { + bitField0_ = (bitField0_ & ~0x00000010); + peerProtocol_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Protocol.NATIVE; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:ReplicationPeer) } @@ -10656,25 +10837,27 @@ public final class ZooKeeperProtos { "G\020\001\022\016\n\nLOG_REPLAY\020\002\"n\n\005Table\022$\n\005state\030\001 " + "\002(\0162\014.Table.State:\007ENABLED\"?\n\005State\022\013\n\007E" + "NABLED\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n" + - "\010ENABLING\020\003\"\215\001\n\017ReplicationPeer\022\022\n\nclust", + "\010ENABLING\020\003\"\352\001\n\017ReplicationPeer\022\022\n\nclust", "erkey\030\001 \002(\t\022\037\n\027replicationEndpointImpl\030\002" + " \001(\t\022\035\n\004data\030\003 \003(\0132\017.BytesBytesPair\022&\n\rc" + - "onfiguration\030\004 \003(\0132\017.NameStringPair\"^\n\020R" + - "eplicationState\022&\n\005state\030\001 \002(\0162\027.Replica" + - "tionState.State\"\"\n\005State\022\013\n\007ENABLED\020\000\022\014\n" + - "\010DISABLED\020\001\"+\n\027ReplicationHLogPosition\022\020" + - "\n\010position\030\001 \002(\003\"%\n\017ReplicationLock\022\022\n\nl" + - "ock_owner\030\001 \002(\t\"\230\001\n\tTableLock\022\036\n\ntable_n" + - "ame\030\001 \001(\0132\n.TableName\022\037\n\nlock_owner\030\002 \001(" + - "\0132\013.ServerName\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_", - "shared\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_" + - "time\030\006 \001(\003\";\n\017StoreSequenceId\022\023\n\013family_" + - "name\030\001 \002(\014\022\023\n\013sequence_id\030\002 \002(\004\"g\n\026Regio" + - "nStoreSequenceIds\022 \n\030last_flushed_sequen" + - "ce_id\030\001 \002(\004\022+\n\021store_sequence_id\030\002 \003(\0132\020" + - ".StoreSequenceIdBE\n*org.apache.hadoop.hb" + - "ase.protobuf.generatedB\017ZooKeeperProtosH" + - "\001\210\001\001\240\001\001" + "onfiguration\030\004 \003(\0132\017.NameStringPair\0227\n\014p" + + "eerProtocol\030\005 \001(\0162\031.ReplicationPeer.Prot" + + "ocol:\006NATIVE\"\"\n\010Protocol\022\n\n\006NATIVE\020\000\022\n\n\006" + + "THRIFT\020\001\"^\n\020ReplicationState\022&\n\005state\030\001 " + + "\002(\0162\027.ReplicationState.State\"\"\n\005State\022\013\n" + + "\007ENABLED\020\000\022\014\n\010DISABLED\020\001\"+\n\027ReplicationH" + + "LogPosition\022\020\n\010position\030\001 \002(\003\"%\n\017Replica" + + "tionLock\022\022\n\nlock_owner\030\001 \002(\t\"\230\001\n\tTableLo", + "ck\022\036\n\ntable_name\030\001 \001(\0132\n.TableName\022\037\n\nlo" + + "ck_owner\030\002 \001(\0132\013.ServerName\022\021\n\tthread_id" + + "\030\003 \001(\003\022\021\n\tis_shared\030\004 \001(\010\022\017\n\007purpose\030\005 \001" + + "(\t\022\023\n\013create_time\030\006 \001(\003\";\n\017StoreSequence" + + "Id\022\023\n\013family_name\030\001 \002(\014\022\023\n\013sequence_id\030\002" + + " \002(\004\"g\n\026RegionStoreSequenceIds\022 \n\030last_f" + + "lushed_sequence_id\030\001 \002(\004\022+\n\021store_sequen" + + "ce_id\030\002 \003(\0132\020.StoreSequenceIdBE\n*org.apa" + + "che.hadoop.hbase.protobuf.generatedB\017Zoo" + + "KeeperProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -10722,7 +10905,7 @@ public final class ZooKeeperProtos { internal_static_ReplicationPeer_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicationPeer_descriptor, - new java.lang.String[] { "Clusterkey", "ReplicationEndpointImpl", "Data", "Configuration", }); + new java.lang.String[] { "Clusterkey", "ReplicationEndpointImpl", "Data", "Configuration", "PeerProtocol", }); internal_static_ReplicationState_descriptor = getDescriptor().getMessageTypes().get(7); internal_static_ReplicationState_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto index ce11162..5fa4374 100644 --- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto +++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto @@ -119,12 +119,17 @@ message Table { * Used by replication. Holds a replication peer key. */ message ReplicationPeer { + enum Protocol { + NATIVE = 0; + THRIFT = 1; + } // clusterkey is the concatenation of the slave cluster's // hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent required string clusterkey = 1; optional string replicationEndpointImpl = 2; repeated BytesBytesPair data = 3; repeated NameStringPair configuration = 4; + optional Protocol peerProtocol = 5 [default = NATIVE]; } /** diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index 3c1e436..cdaeaee 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -341,6 +341,17 @@ com.google.guava guava + + org.apache.thrift + libthrift + ${thrift.version} + + + org.slf4j + slf4j-simple + + + commons-cli commons-cli diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index e4ec0bc..b179d93 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; @@ -68,7 +67,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi //Metrics for this source private MetricsSource metrics; // Handles connecting to peer region servers - private ReplicationSinkManager replicationSinkMgr; + protected ReplicationSinkManager replicationSinkMgr; private boolean peersSelected = false; @Override @@ -153,13 +152,11 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi SinkPeer sinkPeer = null; try { sinkPeer = replicationSinkMgr.getReplicationSink(); - BlockingInterface rrs = sinkPeer.getRegionServer(); if (LOG.isTraceEnabled()) { LOG.trace("Replicating " + entries.size() + " entries of total size " + replicateContext.getSize()); } - ReplicationProtbufUtil.replicateWALEntry(rrs, - entries.toArray(new HLog.Entry[entries.size()])); + replicateWALEntry(entries, sinkPeer); // update metrics this.metrics.setAgeOfLastShippedOp(entries.get(entries.size()-1).getKey().getWriteTime()); @@ -205,6 +202,12 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi return false; // in case we exited before replicating } + protected void replicateWALEntry(List entries, + SinkPeer sinkPeer) throws IOException { + ReplicationProtbufUtil.replicateWALEntry(sinkPeer.getRegionServer(), + entries.toArray(new HLog.Entry[entries.size()])); + } + protected boolean isPeerEnabled() { return ctx.getReplicationPeer().getPeerState() == PeerState.ENABLED; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index 15ccffe..0a3a5b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -201,10 +201,16 @@ public class Replication implements WALActionsListener, if (this.replication) { try { this.replicationManager.init(); + this.replicationSink = new ReplicationSink( + this.conf, + this.server, + ZKClusterId.getUUIDForCluster(this.server.getZooKeeper()).toString() + ); + } catch (KeeperException e) { + throw new IOException(e); } catch (ReplicationException e) { throw new IOException(e); } - this.replicationSink = new ReplicationSink(this.conf, this.server); this.scheduleThreadPool.scheduleAtFixedRate( new ReplicationStatisticsThread(this.replicationSink, this.replicationManager), statsThreadPeriod, statsThreadPeriod, TimeUnit.SECONDS); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java index 746f1fd..455808c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java @@ -33,16 +33,9 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; @@ -50,8 +43,18 @@ import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Row; +import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.regionserver.wal.HLog; +import org.apache.hadoop.hbase.replication.thrift.ThriftAdaptors; +import org.apache.hadoop.hbase.replication.thrift.ThriftServer; +import org.apache.hadoop.hbase.replication.thrift.generated.TBatchEdit; +import org.apache.hadoop.hbase.replication.thrift.generated.THBaseService; +import org.apache.hadoop.hbase.replication.thrift.generated.TIOError; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.thrift.TException; /** * This class is responsible for replicating the edits coming @@ -68,13 +71,17 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; * TODO make this class more like ReplicationSource wrt log handling */ @InterfaceAudience.Private -public class ReplicationSink { +public class ReplicationSink implements THBaseService.Iface { private static final Log LOG = LogFactory.getLog(ReplicationSink.class); + public static String CONF_KEY_REPLICATION_THRIFT = "hbase.replication.sink.enable.thrift"; + private final Configuration conf; private final HConnection sharedHtableCon; private final MetricsSink metrics; private final AtomicLong totalReplicatedEdits = new AtomicLong(); + private final String clusterId; + private ThriftServer thriftServer; /** * Create a sink for replication @@ -83,8 +90,9 @@ public class ReplicationSink { * @param stopper boolean to tell this thread to stop * @throws IOException thrown when HDFS goes bad or bad file name */ - public ReplicationSink(Configuration conf, Stoppable stopper) + public ReplicationSink(Configuration conf, Stoppable stopper, String clusterId) throws IOException { + this.clusterId = clusterId; this.conf = HBaseConfiguration.create(conf); decorateConf(); this.metrics = new MetricsSink(); @@ -95,7 +103,7 @@ public class ReplicationSink { * decorate the Configuration object to make replication more receptive to delays: * lessen the timeout and numTries. */ - private void decorateConf() { + private void decorateConf() throws IOException { this.conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, this.conf.getInt("replication.sink.client.retries.number", 4)); this.conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, @@ -104,6 +112,11 @@ public class ReplicationSink { if (StringUtils.isNotEmpty(replicationCodec)) { this.conf.set(HConstants.RPC_CODEC_CONF_KEY, replicationCodec); } + if(conf.getBoolean(CONF_KEY_REPLICATION_THRIFT, false)) { + LOG.info("Starting up thrift server for replication"); + thriftServer = new ThriftServer(conf, this); + thriftServer.start(); + } } /** @@ -213,6 +226,9 @@ public class ReplicationSink { */ public void stopReplicationSinkServices() { try { + if (thriftServer != null) { + thriftServer.shutdown(); + } this.sharedHtableCon.close(); } catch (IOException e) { LOG.warn("IOException while closing the connection", e); // ignoring as we are closing. @@ -255,4 +271,25 @@ public class ReplicationSink { "age in ms of last applied edit: " + this.metrics.refreshAgeOfLastAppliedOp() + ", total replicated edits: " + this.totalReplicatedEdits; } + + @Override public void replicate(TBatchEdit edits) throws TIOError, TException { + try { + List logEntries = ThriftAdaptors.REPLICATION_BATCH_ADAPTOR.fromThrift(edits); + Pair p = + ReplicationProtbufUtil.buildReplicateWALEntryRequest( + logEntries.toArray(new HLog.Entry[logEntries.size()]) + ); + replicateEntries(p.getFirst().getEntryList(), p.getSecond()); + } catch (IOException e) { + throw new TException("Failed to replicate", e); + } + } + + @Override public void ping() throws TException { + LOG.trace("PING CALLED"); + } + + @Override public String getClusterUUID() throws TException { + return clusterId; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java index 28fb50f..1e8cdfe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java @@ -166,7 +166,7 @@ public class ReplicationSinkManager { this.regionServer = regionServer; } - ServerName getServerName() { + public ServerName getServerName() { return serverName; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ChainedTTransportFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ChainedTTransportFactory.java new file mode 100644 index 0000000..8748f16 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ChainedTTransportFactory.java @@ -0,0 +1,40 @@ +/* + * Copyright 2010 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication.thrift; + +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportFactory; + +final class ChainedTTransportFactory extends TTransportFactory { + private final TTransportFactory parentTransFactory; + private final TTransportFactory childTransFactory; + + ChainedTTransportFactory( + TTransportFactory parentTransFactory, + TTransportFactory childTransFactory) { + this.parentTransFactory = parentTransFactory; + this.childTransFactory = childTransFactory; + } + + @Override + public TTransport getTransport(TTransport trans) { + return childTransFactory.getTransport(parentTransFactory.getTransport(trans)); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/TFilterTransport.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/TFilterTransport.java new file mode 100644 index 0000000..cc79b62 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/TFilterTransport.java @@ -0,0 +1,99 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication.thrift; + +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportException; + +/** + * Transport that simply wraps another transport. + * This is the equivalent of FilterInputStream for Thrift transports. + */ +public class TFilterTransport extends TTransport { + protected final TTransport wrapped; + + public TFilterTransport(TTransport wrapped) { + this.wrapped = wrapped; + } + + @Override + public void open() throws TTransportException { + wrapped.open(); + } + + @Override + public boolean isOpen() { + return wrapped.isOpen(); + } + + @Override + public boolean peek() { + return wrapped.peek(); + } + + @Override + public void close() { + wrapped.close(); + } + + @Override + public int read(byte[] buf, int off, int len) throws TTransportException { + return wrapped.read(buf, off, len); + } + + @Override + public int readAll(byte[] buf, int off, int len) throws TTransportException { + return wrapped.readAll(buf, off, len); + } + + @Override + public void write(byte[] buf) throws TTransportException { + wrapped.write(buf); + } + + @Override + public void write(byte[] buf, int off, int len) throws TTransportException { + wrapped.write(buf, off, len); + } + + @Override + public void flush() throws TTransportException { + wrapped.flush(); + } + + @Override + public byte[] getBuffer() { + return wrapped.getBuffer(); + } + + @Override + public int getBufferPosition() { + return wrapped.getBufferPosition(); + } + + @Override + public int getBytesRemainingInBuffer() { + return wrapped.getBytesRemainingInBuffer(); + } + + @Override + public void consumeBuffer(int len) { + wrapped.consumeBuffer(len); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/TUGIAssumingTransport.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/TUGIAssumingTransport.java new file mode 100644 index 0000000..649e132 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/TUGIAssumingTransport.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication.thrift; + +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportException; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; + +public class TUGIAssumingTransport extends TFilterTransport { + protected UserGroupInformation ugi; + + public TUGIAssumingTransport(TTransport wrapped, UserGroupInformation ugi) { + super(wrapped); + this.ugi = ugi; + } + + @Override + public void open() throws TTransportException { + try { + ugi.doAs(new PrivilegedExceptionAction() { + public Void run() { + try { + wrapped.open(); + } catch (TTransportException tte) { + // Wrap the transport exception in an RTE, since UGI.doAs() then goes + // and unwraps this for us out of the doAs block. We then unwrap one + // more time in our catch clause to get back the TTE. (ugh) + throw new RuntimeException(tte); + } + return null; + } + }); + } catch (IOException ioe) { + throw new RuntimeException("Received an ioe we never threw!", ioe); + } catch (InterruptedException ie) { + throw new RuntimeException("Received an ie we never threw!", ie); + } catch (RuntimeException rte) { + if (rte.getCause() instanceof TTransportException) { + throw (TTransportException)rte.getCause(); + } else { + throw rte; + } + } + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/TUGIAssumingTransportFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/TUGIAssumingTransportFactory.java new file mode 100644 index 0000000..daa32be --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/TUGIAssumingTransportFactory.java @@ -0,0 +1,55 @@ +/* + * Copyright 2010 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication.thrift; + +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportFactory; + +import java.security.PrivilegedAction; + +/** + * A TransportFactory that wraps another one, but assumes a specified UGI + * before calling through. + * + * This is used on the server side to assume the server's Principal when accepting + * clients. + */ +public class TUGIAssumingTransportFactory extends TTransportFactory { + private final UserGroupInformation ugi; + private final TTransportFactory wrapped; + + public TUGIAssumingTransportFactory(TTransportFactory wrapped, UserGroupInformation ugi) { + assert wrapped != null; + assert ugi != null; + + this.wrapped = wrapped; + this.ugi = ugi; + } + + @Override + public TTransport getTransport(final TTransport trans) { + return ugi.doAs(new PrivilegedAction() { + public TTransport run() { + return wrapped.getTransport(trans); + } + }); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftAdaptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftAdaptors.java new file mode 100644 index 0000000..42346ab --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftAdaptors.java @@ -0,0 +1,158 @@ +package org.apache.hadoop.hbase.replication.thrift; + +import com.google.common.base.Function; +import com.google.common.collect.Lists; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.regionserver.wal.HLog; +import org.apache.hadoop.hbase.regionserver.wal.HLogKey; +import org.apache.hadoop.hbase.regionserver.wal.WALEdit; +import org.apache.hadoop.hbase.replication.thrift.generated.*; +import java.util.List; +import java.util.UUID; + +public class ThriftAdaptors { + + interface Adaptor { + + K fromThrift(V vee); + + V toThrift(K kay); + + } + + public static final ReplicationBatchAdaptor REPLICATION_BATCH_ADAPTOR = new ReplicationBatchAdaptor(); + static final ClusterIdAdaptor CLUSTER_ID_ADAPTOR = new ClusterIdAdaptor(); + static final ColumnValueAdaptor COLUMN_VALUE_ADAPTOR = new ColumnValueAdaptor(); + static final WALEntryAdaptor WAL_ENTRY_ADAPTOR = new WALEntryAdaptor(); + + public static class ReplicationBatchAdaptor implements Adaptor, TBatchEdit> { + + @Override public List fromThrift(TBatchEdit thriftBatch) { + return Lists.transform(thriftBatch.getEdits(), new Function() { + @Override public HLog.Entry apply(TEdit input) { + return new HLog.Entry( + new HLogKeyAdaptor( + CLUSTER_ID_ADAPTOR.fromThrift(input.getClusterIds())) + .fromThrift(input.getHLogKey()), + WAL_ENTRY_ADAPTOR.fromThrift(input.getWalEdit()) + ); + } + }); + } + + @Override public TBatchEdit toThrift(List hLogEntries) { + TBatchEdit batch = new TBatchEdit(); + for (HLog.Entry hLogEntry : hLogEntries) { + List clusterIds = hLogEntry.getKey().getClusterIds(); + HLogKeyAdaptor adaptor = new HLogKeyAdaptor(clusterIds); + batch.addToEdits( + new TEdit( + adaptor.toThrift(hLogEntry.getKey()), + WAL_ENTRY_ADAPTOR.toThrift(hLogEntry.getEdit()), + CLUSTER_ID_ADAPTOR.toThrift(clusterIds) + )); + } + return batch; + } + } + + // Maps HLogKey to Thrift: THLogKey + static class HLogKeyAdaptor implements Adaptor { + + private final List clusterIds; + + HLogKeyAdaptor(List clusterIds) { + this.clusterIds = clusterIds; + } + + @Override public HLogKey fromThrift(THLogKey key) { + return new HLogKey( + null, + TableName.valueOf(key.getTableName()), + key.getSeqNum(), + System.currentTimeMillis(), + clusterIds, + HConstants.NO_NONCE, + HConstants.NO_NONCE + ); + } + + @Override public THLogKey toThrift(HLogKey hLogKey) { + THLogKey key = new THLogKey(); + key.setTableName(hLogKey.getTablename().getName()); + key.setWriteTime(hLogKey.getWriteTime()); + return key; + } + } + + // Maps WALEdit to Thrift: TWalEdit + static class WALEntryAdaptor implements Adaptor { + + @Override public WALEdit fromThrift(TWalLEdit thriftEdit) { + WALEdit walEdit = new WALEdit(); + for (TColumnValue mutation : thriftEdit.getMutations()) { + walEdit.add(COLUMN_VALUE_ADAPTOR.fromThrift(mutation)); + } + return walEdit; + } + + @Override public TWalLEdit toThrift(WALEdit walEdit) { + TWalLEdit thriftEdit = new TWalLEdit(); + thriftEdit.setMutations( + Lists.transform(walEdit.getKeyValues(), new Function() { + @Override public TColumnValue apply(KeyValue keyValue) { + return COLUMN_VALUE_ADAPTOR.toThrift(keyValue); + } + }) + ); + return thriftEdit; + } + } + + // Maps Cluster's UUID to Thrift: TClusterId + static class ClusterIdAdaptor implements Adaptor, List> { + + @Override public List fromThrift(List clusterIds) { + return Lists.transform(clusterIds, new Function() { + @Override public UUID apply(TClusterId clusterId) { + return new UUID(clusterId.getUb(), clusterId.getLb()); + } + }); + } + + @Override public List toThrift(List uuids) { + return Lists.transform(uuids, new Function() { + @Override public TClusterId apply(UUID uuid) { + return new TClusterId(uuid.getLeastSignificantBits(), uuid.getMostSignificantBits()); + } + }); + } + } + + // Maps KeyValue to Thrift: TColumnValue + static class ColumnValueAdaptor implements Adaptor { + + @Override public Cell fromThrift(TColumnValue tColumnValue) { + return new KeyValue( + tColumnValue.getRow(), + tColumnValue.getFamily(), + tColumnValue.getQualifier(), + tColumnValue.getTimestamp(), + ThriftEditType.codeToType(tColumnValue.getType()).getKvType(), + tColumnValue.getValue() + ); + } + + @Override public TColumnValue toThrift(Cell kv) { + TColumnValue col = new TColumnValue(); + col.setRow(kv.getRow()); + col.setFamily(kv.getFamily()); + col.setQualifier(kv.getQualifier()); + col.setValue(kv.getValue()); + col.setTimestamp(kv.getTimestamp()); + col.setType( + ThriftEditType.keyValueToType(KeyValue.Type.codeToType(kv.getTypeByte())).getCode()); + return col; + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftClient.java new file mode 100644 index 0000000..6277456 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftClient.java @@ -0,0 +1,187 @@ +/** + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication.thrift; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.regionserver.wal.HLog; +import org.apache.hadoop.hbase.replication.thrift.generated.THBaseService; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.security.SaslRpcServer; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.thrift.TException; +import org.apache.thrift.protocol.TBinaryProtocol; +import org.apache.thrift.protocol.TCompactProtocol; +import org.apache.thrift.protocol.TProtocol; +import org.apache.thrift.transport.TSaslClientTransport; +import org.apache.thrift.transport.TSocket; +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportException; + +import javax.security.sasl.Sasl; +import java.io.IOException; +import java.util.*; +import java.util.concurrent.ConcurrentSkipListMap; + +public class ThriftClient { + private static final Log LOG = LogFactory.getLog(ThriftClient.class); + private Configuration conf; + private final String peerId; + private boolean isSecure; + private ConcurrentSkipListMap clients = + new ConcurrentSkipListMap(); + + + public ThriftClient(Configuration conf, String peerId) throws IOException { + this.conf = conf; + this.peerId = peerId; + this.isSecure = User.isHBaseSecurityEnabled(conf); + } + + private THBaseService.Client createClient(String host, int port) throws IOException, + TTransportException { + boolean isCompact = + conf.getBoolean("hbase.replication.thrift.compact", true); + + String serverProtocol = UserGroupInformation.getCurrentUser().getUserName(); + String serverAddress = null; + if(User.isHBaseSecurityEnabled(conf)) { + String kerberosName = UserGroupInformation.getCurrentUser().getUserName(); + final String names[] = SaslRpcServer.splitKerberosName(kerberosName); + if (names.length != 3) { + throw new TTransportException("Kerberos principal should have 3 parts: " + kerberosName); + } + serverProtocol = names[0]; + serverAddress = names[1]; + } + + TTransport transport = new TSocket(host, port); + if(isSecure) { + Map saslProps = new HashMap(); + saslProps.put(Sasl.QOP, ThriftUtilities.getQOP(conf).getSaslQop()); + transport = new TUGIAssumingTransport( + new TSaslClientTransport( + ThriftUtilities.getAuthType(conf).getMechanismName(), + null, + serverProtocol, serverAddress, + saslProps, null, + transport), + User.getCurrent().getUGI()); + } + try { + transport.open(); + LOG.debug("Connected to "+host+":"+port); + } catch (TTransportException e) { + throw new IOException("Failed to open transport connection to : "+host+":"+port, e); + } + + TProtocol protocol; + if(isCompact) { + protocol = new TCompactProtocol(transport); + } else { + protocol = new TBinaryProtocol(transport); + } + + return new THBaseService.Client(protocol); + } + + public THBaseService.Client getClient(String host, int port) throws IOException, TTransportException { + String key = host+":"+port; + if(clients.containsKey(key)) { + return clients.get(key); + } + THBaseService.Client client = createClient(host, port); + clients.put(key, client); + return clients.get(key); + } + + public void removeClient(String host, int port) { + clients.remove(host+":"+port); + } + + public void ping(ServerName serverName) throws IOException{ + THBaseService.Client client = null; + try { + client = getClientFromServerName(serverName); + client.ping(); + } catch (TException e) { + removeClient(serverName.getHostname(), ThriftUtilities.getDestinationPeerPort(conf, peerId)); + try { + client.getOutputProtocol().getTransport().close(); + } catch(Exception e2) { + LOG.debug("Failed to gracefully close broken transport.", e2); + } + throw new IOException("Failed to ping replication client", e); + } + + } + + public void shipEdits(ServerName serverName, List entries) throws IOException { + THBaseService.Client client; + String host = serverName.getHostname(); + int port = ThriftUtilities.getDestinationPeerPort(conf, peerId); + try { + client = getClient(host, port); + } catch (TTransportException e) { + throw new IOException("Failed to create replication client", e); + } + try { + client.replicate(ThriftAdaptors.REPLICATION_BATCH_ADAPTOR.toThrift(entries)); + } catch (TTransportException e) { + removeClient(host, port); + try { + client.getOutputProtocol().getTransport().close(); + } catch(Exception e2) { + LOG.debug("Failed to gracefully close broken transport.", e2); + } + throw new IOException("Failed to ship edits", e); + } catch (TException e) { + throw new IOException("Failed to ship edits", e); + } + } + + public UUID getPeerClusterUUID(ServerName serverName) { + THBaseService.Client client; + String host = serverName.getHostname(); + int port = ThriftUtilities.getDestinationPeerPort(conf, peerId); + try { + client = getClient(host, port); + return UUID.fromString(client.getClusterUUID()); + } catch (Exception e) { + LOG.error("Error getting UUID from remote cluster", e); + return null; + } + } + + + private THBaseService.Client getClientFromServerName(ServerName serverName) throws IOException { + THBaseService.Client client; + String host = serverName.getHostname(); + int port = ThriftUtilities.getDestinationPeerPort(conf, peerId); + try { + client = getClient(host, port); + } catch (TTransportException e) { + throw new IOException("Failed to create replication client", e); + } + return client; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftEditType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftEditType.java new file mode 100644 index 0000000..99d3d67 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftEditType.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication.thrift; + +import com.google.common.base.Function; +import com.google.common.collect.Maps; +import org.apache.hadoop.hbase.KeyValue; + +import java.util.Arrays; +import java.util.Map; + +public enum ThriftEditType { + PUT((byte)1, KeyValue.Type.Put), + DELCF((byte)2, KeyValue.Type.DeleteFamily), + DELCOLS((byte)3, KeyValue.Type.DeleteColumn), + DEL((byte)4, KeyValue.Type.Delete); + + private static Map toEditType = + Maps.uniqueIndex(Arrays.asList(ThriftEditType.values()), + new Function() { + @Override public Byte apply(ThriftEditType editType) { + return editType.getCode(); + } + } + ); + + private static Map keyValueToThrift = + Maps.uniqueIndex(Arrays.asList(ThriftEditType.values()), + new Function() { + @Override public KeyValue.Type apply(ThriftEditType editType) { + return editType.getKvType(); + } + } + ); + + private byte val; + private KeyValue.Type kvType; + + ThriftEditType(byte i, KeyValue.Type kvType) { + val = i; + this.kvType = kvType; + } + + public byte getCode() { + return val; + } + + public KeyValue.Type getKvType() { + return kvType; + } + + public static ThriftEditType codeToType(byte someByte) { + return toEditType.get(someByte); + } + + public static ThriftEditType keyValueToType(KeyValue.Type keyValueType) { + return keyValueToThrift.get(keyValueType); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftHBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftHBaseReplicationEndpoint.java new file mode 100644 index 0000000..9cd3aa5 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftHBaseReplicationEndpoint.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication.thrift; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.regionserver.wal.HLog; +import org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager; + +import java.io.IOException; +import java.util.List; +import java.util.UUID; + +/** + * A {@link org.apache.hadoop.hbase.replication.BaseReplicationEndpoint} for replication endpoints whose + * target cluster is an HBase cluster over thrift. + */ +@InterfaceAudience.Private +public class ThriftHBaseReplicationEndpoint extends HBaseInterClusterReplicationEndpoint { + + private static final Log LOG = LogFactory.getLog(ThriftHBaseReplicationEndpoint.class); + private ThriftClient client; + + @Override public void init(Context context) throws IOException { + super.init(context); + client = new ThriftClient(ctx.getConfiguration(), ctx.getPeerId()); + } + + @Override protected void replicateWALEntry(List entries, + ReplicationSinkManager.SinkPeer sinkPeer) throws IOException { + client.shipEdits(sinkPeer.getServerName(), entries); + } + + @Override public synchronized UUID getPeerUUID() { + UUID result = null; + try { + result = client.getPeerClusterUUID(replicationSinkMgr.getReplicationSink().getServerName()); + } catch (IOException e) { + LOG.warn("Error connecting to peer to get UUID", e); + } + return result; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftHBaseServiceHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftHBaseServiceHandler.java new file mode 100644 index 0000000..8d37018 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftHBaseServiceHandler.java @@ -0,0 +1,102 @@ +/** + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication.thrift; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.replication.thrift.generated.TBatchEdit; +import org.apache.hadoop.hbase.replication.thrift.generated.THBaseService; +import org.apache.hadoop.hbase.replication.thrift.generated.TIOError; +import org.apache.thrift.TException; + +import java.io.IOException; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; + +public class ThriftHBaseServiceHandler implements THBaseService.Iface { + + private final THBaseService.Iface serviceHandler; + private static final Log LOG = LogFactory.getLog(ThriftHBaseServiceHandler.class); + + + public static THBaseService.Iface newInstance(Configuration conf, + THBaseService.Iface serviceHandler, ThriftMetrics metrics) { + THBaseService.Iface handler = new ThriftHBaseServiceHandler(conf, serviceHandler); + return (THBaseService.Iface) Proxy.newProxyInstance(handler.getClass().getClassLoader(), + new Class[] { THBaseService.Iface.class }, new THBaseServiceMetricsProxy(handler, metrics)); + } + + private static class THBaseServiceMetricsProxy implements InvocationHandler { + private final THBaseService.Iface handler; + private final ThriftMetrics metrics; + + private THBaseServiceMetricsProxy(THBaseService.Iface handler, ThriftMetrics metrics) { + this.handler = handler; + this.metrics = metrics; + } + + @Override + public Object invoke(Object proxy, Method m, Object[] args) throws Throwable { + Object result; + try { + long start = now(); + result = m.invoke(handler, args); + int processTime = (int) (now() - start); + metrics.incMethodTime(m.getName(), processTime); + } catch (InvocationTargetException e) { + throw e.getTargetException(); + } catch (Exception e) { + throw new RuntimeException("unexpected invocation exception: " + e.getMessage()); + } + return result; + } + } + + private static long now() { + return System.nanoTime(); + } + + ThriftHBaseServiceHandler(Configuration conf, THBaseService.Iface serviceHandler) { + this.serviceHandler = serviceHandler; + } + + private TIOError getTIOError(IOException e) { + TIOError err = new TIOError(); + err.setMessage(e.getMessage()); + return err; + } + + @Override + public void replicate(TBatchEdit edits) throws TException { + serviceHandler.replicate(edits); + } + + @Override public void ping() throws TException { + serviceHandler.ping(); + } + + @Override public String getClusterUUID() throws TException { + return serviceHandler.getClusterUUID(); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftMetrics.java new file mode 100644 index 0000000..7d189e4 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftMetrics.java @@ -0,0 +1,83 @@ +/* + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hbase.replication.thrift; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.thrift.MetricsThriftServerSource; +import org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactory; + +/** + * This class is for maintaining the various statistics of thrift server + * and publishing them through the metrics interfaces. + */ +@InterfaceAudience.Private +public class ThriftMetrics { + + private MetricsThriftServerSource source; + private final long slowResponseTime; + public static final String SLOW_RESPONSE_NANO_SEC = "hbase.thrift.slow.response.nano.second"; + public static final long DEFAULT_SLOW_RESPONSE_NANO_SEC = 10 * 1000 * 1000; + + public MetricsThriftServerSource getSource() { + return source; + } + + public void setSource(MetricsThriftServerSource source) { + this.source = source; + } + + + + + public ThriftMetrics(Configuration conf) { + slowResponseTime = conf.getLong( SLOW_RESPONSE_NANO_SEC, DEFAULT_SLOW_RESPONSE_NANO_SEC); + source = + CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class) + .createThriftOneSource(); + } + + public void incTimeInQueue(long time) { + source.incTimeInQueue(time); + } + + public void setCallQueueLen(int len) { + source.setCallQueueLen(len); + } + + public void incNumRowKeysInBatchGet(int diff) { + source.incNumRowKeysInBatchGet(diff); + } + + public void incNumRowKeysInBatchMutate(int diff) { + source.incNumRowKeysInBatchMutate(diff); + } + + public void incMethodTime(String name, long time) { + source.incMethodTime(name, time); + // inc general processTime + source.incCall(time); + if (time > slowResponseTime) { + source.incSlowCall(time); + } + } + +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftServer.java new file mode 100644 index 0000000..d2f0b8d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftServer.java @@ -0,0 +1,173 @@ +/** + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication.thrift; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.replication.thrift.generated.THBaseService; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.security.SaslRpcServer; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.thrift.protocol.TBinaryProtocol; +import org.apache.thrift.protocol.TCompactProtocol; +import org.apache.thrift.protocol.TProtocolFactory; +import org.apache.thrift.server.TServer; +import org.apache.thrift.server.TThreadPoolServer; +import org.apache.thrift.transport.*; + +import javax.security.sasl.Sasl; +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.HashMap; +import java.util.Map; + +public class ThriftServer extends Thread { + private static final Log LOG = LogFactory.getLog(ThriftServer.class); + private TServer server; + private Configuration conf; + private THBaseService.Iface sinkInterface; + private boolean useSecure; + + public ThriftServer(Configuration conf, THBaseService.Iface sinkInterface) throws IOException { + this.conf = conf; + this.sinkInterface = sinkInterface; + this.useSecure = User.isHBaseSecurityEnabled(conf); + try { + init(); + } catch (TTransportException e) { + throw new IOException(e); + } + } + + private static TProtocolFactory getTProtocolFactory(boolean isCompact) { + if (isCompact) { + LOG.debug("Using compact protocol"); + return new TCompactProtocol.Factory(); + } else { + LOG.debug("Using binary protocol"); + return new TBinaryProtocol.Factory(); + } + } + + private static TTransportFactory getTTransportFactory(boolean framed) { + if (framed) { + LOG.debug("Using framed transport"); + return new TFramedTransport.Factory(); + } else { + return new TTransportFactory(); + } + } + + /* + * If bindValue is null, we don't bind. + */ + private static InetSocketAddress bindToPort(String bindValue, int listenPort) + throws UnknownHostException { + try { + if (bindValue == null) { + return new InetSocketAddress(listenPort); + } else { + return new InetSocketAddress(InetAddress.getByName(bindValue), listenPort); + } + } catch (UnknownHostException e) { + throw new RuntimeException("Could not bind to provided ip address", e); + } + } + + private static TServer getTThreadPoolServer(TProtocolFactory protocolFactory, THBaseService.Processor processor, + TTransportFactory transportFactory, InetSocketAddress inetSocketAddress) throws TTransportException { + TServerTransport serverTransport = new TServerSocket(inetSocketAddress); + LOG.info("starting HBase ThreadPool Thrift server on " + inetSocketAddress.toString()); + TThreadPoolServer.Args serverArgs = new TThreadPoolServer.Args(serverTransport); + serverArgs.maxWorkerThreads(100); + serverArgs.processor(processor); + serverArgs.transportFactory(transportFactory); + serverArgs.protocolFactory(protocolFactory); + return new TThreadPoolServer(serverArgs); + } + + public void init() throws TTransportException, IOException { + int listenPort = ThriftUtilities.getThriftServerPort(conf); + + ThriftMetrics metrics = new ThriftMetrics(conf); + + boolean isCompact = + conf.getBoolean("hbase.replication.thrift.compact", true); + + // Construct correct ProtocolFactory + TProtocolFactory protocolFactory = getTProtocolFactory(isCompact); + THBaseService.Iface handler = + ThriftHBaseServiceHandler.newInstance(conf, sinkInterface, metrics); + THBaseService.Processor processor = new THBaseService.Processor(handler); + + + boolean isFramed = + conf.getBoolean("hbase.replication.thrift.framed", false); + TTransportFactory transportFactory = getTTransportFactory(isFramed); + + + String serverProtocol = UserGroupInformation.getCurrentUser().getUserName(); + String serverAddress = null; + if(User.isHBaseSecurityEnabled(conf)) { + String kerberosName = UserGroupInformation.getCurrentUser().getUserName(); + final String names[] = SaslRpcServer.splitKerberosName(kerberosName); + if (names.length != 3) { + throw new TTransportException("Kerberos principal should have 3 parts: " + kerberosName); + } + serverProtocol = names[0]; + serverAddress = names[1]; + } + + if(useSecure) { + Map saslProps = new HashMap(); + saslProps.put(Sasl.QOP, ThriftUtilities.getQOP(conf).getSaslQop()); + TSaslServerTransport.Factory saslTransFactory = new TSaslServerTransport.Factory(); + saslTransFactory.addServerDefinition( + ThriftUtilities.getAuthType(conf).getMechanismName(), + serverProtocol, serverAddress, // two parts of kerberos principal + saslProps, + new SaslRpcServer.SaslGssCallbackHandler()); + + transportFactory = new ChainedTTransportFactory(transportFactory, + new TUGIAssumingTransportFactory(saslTransFactory, + UserGroupInformation.getCurrentUser())); + } + + InetSocketAddress inetSocketAddress = + bindToPort(conf.get("hbase.replication.thrift.address"), listenPort); + LOG.info("Listening on "+inetSocketAddress.getHostName()+":"+inetSocketAddress.getPort()); + server = getTThreadPoolServer(protocolFactory, processor, transportFactory, inetSocketAddress); + } + + public void run() { + server.serve(); + } + + public void shutdown() { + try { + server.stop(); + } catch (Throwable e) { + LOG.error("Error stopping thrift server", e); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftUtilities.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftUtilities.java new file mode 100644 index 0000000..ecf4fee --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/ThriftUtilities.java @@ -0,0 +1,71 @@ +/** + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication.thrift; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.security.SaslRpcServer; +import org.apache.hadoop.security.SaslRpcServer.AuthMethod; +import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection; + +public class ThriftUtilities { + + private ThriftUtilities() { + throw new UnsupportedOperationException("Can't initialize class"); + } + + public static SaslRpcServer.QualityOfProtection getQOP(Configuration conf) { + QualityOfProtection saslQOP = QualityOfProtection.AUTHENTICATION; + String rpcProtection = conf.get("hbase.replication.thrift.protection", + QualityOfProtection.AUTHENTICATION.name().toLowerCase()); + if (QualityOfProtection.INTEGRITY.name().toLowerCase() + .equals(rpcProtection)) { + saslQOP = QualityOfProtection.INTEGRITY; + } else if (QualityOfProtection.PRIVACY.name().toLowerCase().equals( + rpcProtection)) { + saslQOP = QualityOfProtection.PRIVACY; + } + return saslQOP; + } + + public static AuthMethod getAuthType(Configuration conf) { + return AuthMethod.valueOf(conf.get(User.HBASE_SECURITY_CONF_KEY, "SIMPLE")); + } + + public static int getThriftServerPort(Configuration conf) { + return conf.getInt("hbase.replication.thrift.server.port", -1); + } + + /* + * hbase.replication.thrift.peer..port is used for testing purposes only + * in your production cluster, which would be running on different instances, you do + * not need to set this field, simply provide the hbase.replication.thrift.server.port + * value and keep them the same across clusters. + */ + public static int getDestinationPeerPort(Configuration conf, String peerId) { + int port = conf.getInt("hbase.replication.thrift.peer." + peerId + ".port", -1); + // if there is no custom peer port set use the default server port. + if (port == -1) { + port = getThriftServerPort(conf); + } + return port; + } + +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TBatchEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TBatchEdit.java new file mode 100644 index 0000000..815fd18 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TBatchEdit.java @@ -0,0 +1,435 @@ +/** + * Autogenerated by Thrift Compiler (0.9.1) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hbase.replication.thrift.generated; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TBatchEdit implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TBatchEdit"); + + private static final org.apache.thrift.protocol.TField EDITS_FIELD_DESC = new org.apache.thrift.protocol.TField("edits", org.apache.thrift.protocol.TType.LIST, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TBatchEditStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TBatchEditTupleSchemeFactory()); + } + + public List edits; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + EDITS((short)1, "edits"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // EDITS + return EDITS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.EDITS, new org.apache.thrift.meta_data.FieldMetaData("edits", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TEdit.class)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TBatchEdit.class, metaDataMap); + } + + public TBatchEdit() { + } + + public TBatchEdit( + List edits) + { + this(); + this.edits = edits; + } + + /** + * Performs a deep copy on other. + */ + public TBatchEdit(TBatchEdit other) { + if (other.isSetEdits()) { + List __this__edits = new ArrayList(other.edits.size()); + for (TEdit other_element : other.edits) { + __this__edits.add(new TEdit(other_element)); + } + this.edits = __this__edits; + } + } + + public TBatchEdit deepCopy() { + return new TBatchEdit(this); + } + + @Override + public void clear() { + this.edits = null; + } + + public int getEditsSize() { + return (this.edits == null) ? 0 : this.edits.size(); + } + + public java.util.Iterator getEditsIterator() { + return (this.edits == null) ? null : this.edits.iterator(); + } + + public void addToEdits(TEdit elem) { + if (this.edits == null) { + this.edits = new ArrayList(); + } + this.edits.add(elem); + } + + public List getEdits() { + return this.edits; + } + + public TBatchEdit setEdits(List edits) { + this.edits = edits; + return this; + } + + public void unsetEdits() { + this.edits = null; + } + + /** Returns true if field edits is set (has been assigned a value) and false otherwise */ + public boolean isSetEdits() { + return this.edits != null; + } + + public void setEditsIsSet(boolean value) { + if (!value) { + this.edits = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case EDITS: + if (value == null) { + unsetEdits(); + } else { + setEdits((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case EDITS: + return getEdits(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case EDITS: + return isSetEdits(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TBatchEdit) + return this.equals((TBatchEdit)that); + return false; + } + + public boolean equals(TBatchEdit that) { + if (that == null) + return false; + + boolean this_present_edits = true && this.isSetEdits(); + boolean that_present_edits = true && that.isSetEdits(); + if (this_present_edits || that_present_edits) { + if (!(this_present_edits && that_present_edits)) + return false; + if (!this.edits.equals(that.edits)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public int compareTo(TBatchEdit other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetEdits()).compareTo(other.isSetEdits()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEdits()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.edits, other.edits); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TBatchEdit("); + boolean first = true; + + sb.append("edits:"); + if (this.edits == null) { + sb.append("null"); + } else { + sb.append(this.edits); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (edits == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'edits' was not present! Struct: " + toString()); + } + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TBatchEditStandardSchemeFactory implements SchemeFactory { + public TBatchEditStandardScheme getScheme() { + return new TBatchEditStandardScheme(); + } + } + + private static class TBatchEditStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TBatchEdit struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // EDITS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list16 = iprot.readListBegin(); + struct.edits = new ArrayList(_list16.size); + for (int _i17 = 0; _i17 < _list16.size; ++_i17) + { + TEdit _elem18; + _elem18 = new TEdit(); + _elem18.read(iprot); + struct.edits.add(_elem18); + } + iprot.readListEnd(); + } + struct.setEditsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TBatchEdit struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.edits != null) { + oprot.writeFieldBegin(EDITS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.edits.size())); + for (TEdit _iter19 : struct.edits) + { + _iter19.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TBatchEditTupleSchemeFactory implements SchemeFactory { + public TBatchEditTupleScheme getScheme() { + return new TBatchEditTupleScheme(); + } + } + + private static class TBatchEditTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TBatchEdit struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.edits.size()); + for (TEdit _iter20 : struct.edits) + { + _iter20.write(oprot); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TBatchEdit struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list21 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.edits = new ArrayList(_list21.size); + for (int _i22 = 0; _i22 < _list21.size; ++_i22) + { + TEdit _elem23; + _elem23 = new TEdit(); + _elem23.read(iprot); + struct.edits.add(_elem23); + } + } + struct.setEditsIsSet(true); + } + } + +} + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TClusterId.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TClusterId.java new file mode 100644 index 0000000..61cf48f --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TClusterId.java @@ -0,0 +1,480 @@ +/** + * Autogenerated by Thrift Compiler (0.9.1) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hbase.replication.thrift.generated; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TClusterId implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TClusterId"); + + private static final org.apache.thrift.protocol.TField LB_FIELD_DESC = new org.apache.thrift.protocol.TField("lb", org.apache.thrift.protocol.TType.I64, (short)1); + private static final org.apache.thrift.protocol.TField UB_FIELD_DESC = new org.apache.thrift.protocol.TField("ub", org.apache.thrift.protocol.TType.I64, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TClusterIdStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TClusterIdTupleSchemeFactory()); + } + + public long lb; // required + public long ub; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + LB((short)1, "lb"), + UB((short)2, "ub"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // LB + return LB; + case 2: // UB + return UB; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __LB_ISSET_ID = 0; + private static final int __UB_ISSET_ID = 1; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.LB, new org.apache.thrift.meta_data.FieldMetaData("lb", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.UB, new org.apache.thrift.meta_data.FieldMetaData("ub", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TClusterId.class, metaDataMap); + } + + public TClusterId() { + } + + public TClusterId( + long lb, + long ub) + { + this(); + this.lb = lb; + setLbIsSet(true); + this.ub = ub; + setUbIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public TClusterId(TClusterId other) { + __isset_bitfield = other.__isset_bitfield; + this.lb = other.lb; + this.ub = other.ub; + } + + public TClusterId deepCopy() { + return new TClusterId(this); + } + + @Override + public void clear() { + setLbIsSet(false); + this.lb = 0; + setUbIsSet(false); + this.ub = 0; + } + + public long getLb() { + return this.lb; + } + + public TClusterId setLb(long lb) { + this.lb = lb; + setLbIsSet(true); + return this; + } + + public void unsetLb() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LB_ISSET_ID); + } + + /** Returns true if field lb is set (has been assigned a value) and false otherwise */ + public boolean isSetLb() { + return EncodingUtils.testBit(__isset_bitfield, __LB_ISSET_ID); + } + + public void setLbIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LB_ISSET_ID, value); + } + + public long getUb() { + return this.ub; + } + + public TClusterId setUb(long ub) { + this.ub = ub; + setUbIsSet(true); + return this; + } + + public void unsetUb() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __UB_ISSET_ID); + } + + /** Returns true if field ub is set (has been assigned a value) and false otherwise */ + public boolean isSetUb() { + return EncodingUtils.testBit(__isset_bitfield, __UB_ISSET_ID); + } + + public void setUbIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __UB_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case LB: + if (value == null) { + unsetLb(); + } else { + setLb((Long)value); + } + break; + + case UB: + if (value == null) { + unsetUb(); + } else { + setUb((Long)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case LB: + return Long.valueOf(getLb()); + + case UB: + return Long.valueOf(getUb()); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case LB: + return isSetLb(); + case UB: + return isSetUb(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TClusterId) + return this.equals((TClusterId)that); + return false; + } + + public boolean equals(TClusterId that) { + if (that == null) + return false; + + boolean this_present_lb = true; + boolean that_present_lb = true; + if (this_present_lb || that_present_lb) { + if (!(this_present_lb && that_present_lb)) + return false; + if (this.lb != that.lb) + return false; + } + + boolean this_present_ub = true; + boolean that_present_ub = true; + if (this_present_ub || that_present_ub) { + if (!(this_present_ub && that_present_ub)) + return false; + if (this.ub != that.ub) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public int compareTo(TClusterId other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetLb()).compareTo(other.isSetLb()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetLb()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lb, other.lb); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetUb()).compareTo(other.isSetUb()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetUb()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ub, other.ub); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TClusterId("); + boolean first = true; + + sb.append("lb:"); + sb.append(this.lb); + first = false; + if (!first) sb.append(", "); + sb.append("ub:"); + sb.append(this.ub); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TClusterIdStandardSchemeFactory implements SchemeFactory { + public TClusterIdStandardScheme getScheme() { + return new TClusterIdStandardScheme(); + } + } + + private static class TClusterIdStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TClusterId struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // LB + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.lb = iprot.readI64(); + struct.setLbIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // UB + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.ub = iprot.readI64(); + struct.setUbIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TClusterId struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(LB_FIELD_DESC); + oprot.writeI64(struct.lb); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(UB_FIELD_DESC); + oprot.writeI64(struct.ub); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TClusterIdTupleSchemeFactory implements SchemeFactory { + public TClusterIdTupleScheme getScheme() { + return new TClusterIdTupleScheme(); + } + } + + private static class TClusterIdTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TClusterId struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetLb()) { + optionals.set(0); + } + if (struct.isSetUb()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetLb()) { + oprot.writeI64(struct.lb); + } + if (struct.isSetUb()) { + oprot.writeI64(struct.ub); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TClusterId struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.lb = iprot.readI64(); + struct.setLbIsSet(true); + } + if (incoming.get(1)) { + struct.ub = iprot.readI64(); + struct.setUbIsSet(true); + } + } + } + +} + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TColumnValue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TColumnValue.java new file mode 100644 index 0000000..0dfdd26 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TColumnValue.java @@ -0,0 +1,910 @@ +/** + * Autogenerated by Thrift Compiler (0.9.1) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hbase.replication.thrift.generated; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Represents a single cell and its value. + */ +public class TColumnValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumnValue"); + + private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField FAMILY_FIELD_DESC = new org.apache.thrift.protocol.TField("family", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField QUALIFIER_FIELD_DESC = new org.apache.thrift.protocol.TField("qualifier", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField TIMESTAMP_FIELD_DESC = new org.apache.thrift.protocol.TField("timestamp", org.apache.thrift.protocol.TType.I64, (short)5); + private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.BYTE, (short)6); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TColumnValueStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TColumnValueTupleSchemeFactory()); + } + + public ByteBuffer row; // required + public ByteBuffer family; // required + public ByteBuffer qualifier; // required + public ByteBuffer value; // required + public long timestamp; // optional + public byte type; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + ROW((short)1, "row"), + FAMILY((short)2, "family"), + QUALIFIER((short)3, "qualifier"), + VALUE((short)4, "value"), + TIMESTAMP((short)5, "timestamp"), + TYPE((short)6, "type"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // ROW + return ROW; + case 2: // FAMILY + return FAMILY; + case 3: // QUALIFIER + return QUALIFIER; + case 4: // VALUE + return VALUE; + case 5: // TIMESTAMP + return TIMESTAMP; + case 6: // TYPE + return TYPE; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __TIMESTAMP_ISSET_ID = 0; + private static final int __TYPE_ISSET_ID = 1; + private byte __isset_bitfield = 0; + private _Fields optionals[] = {_Fields.TIMESTAMP}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + tmpMap.put(_Fields.FAMILY, new org.apache.thrift.meta_data.FieldMetaData("family", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + tmpMap.put(_Fields.QUALIFIER, new org.apache.thrift.meta_data.FieldMetaData("qualifier", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + tmpMap.put(_Fields.TIMESTAMP, new org.apache.thrift.meta_data.FieldMetaData("timestamp", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BYTE))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TColumnValue.class, metaDataMap); + } + + public TColumnValue() { + } + + public TColumnValue( + ByteBuffer row, + ByteBuffer family, + ByteBuffer qualifier, + ByteBuffer value, + byte type) + { + this(); + this.row = row; + this.family = family; + this.qualifier = qualifier; + this.value = value; + this.type = type; + setTypeIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public TColumnValue(TColumnValue other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetRow()) { + this.row = org.apache.thrift.TBaseHelper.copyBinary(other.row); +; + } + if (other.isSetFamily()) { + this.family = org.apache.thrift.TBaseHelper.copyBinary(other.family); +; + } + if (other.isSetQualifier()) { + this.qualifier = org.apache.thrift.TBaseHelper.copyBinary(other.qualifier); +; + } + if (other.isSetValue()) { + this.value = org.apache.thrift.TBaseHelper.copyBinary(other.value); +; + } + this.timestamp = other.timestamp; + this.type = other.type; + } + + public TColumnValue deepCopy() { + return new TColumnValue(this); + } + + @Override + public void clear() { + this.row = null; + this.family = null; + this.qualifier = null; + this.value = null; + setTimestampIsSet(false); + this.timestamp = 0; + setTypeIsSet(false); + this.type = 0; + } + + public byte[] getRow() { + setRow(org.apache.thrift.TBaseHelper.rightSize(row)); + return row == null ? null : row.array(); + } + + public ByteBuffer bufferForRow() { + return row; + } + + public TColumnValue setRow(byte[] row) { + setRow(row == null ? (ByteBuffer)null : ByteBuffer.wrap(row)); + return this; + } + + public TColumnValue setRow(ByteBuffer row) { + this.row = row; + return this; + } + + public void unsetRow() { + this.row = null; + } + + /** Returns true if field row is set (has been assigned a value) and false otherwise */ + public boolean isSetRow() { + return this.row != null; + } + + public void setRowIsSet(boolean value) { + if (!value) { + this.row = null; + } + } + + public byte[] getFamily() { + setFamily(org.apache.thrift.TBaseHelper.rightSize(family)); + return family == null ? null : family.array(); + } + + public ByteBuffer bufferForFamily() { + return family; + } + + public TColumnValue setFamily(byte[] family) { + setFamily(family == null ? (ByteBuffer)null : ByteBuffer.wrap(family)); + return this; + } + + public TColumnValue setFamily(ByteBuffer family) { + this.family = family; + return this; + } + + public void unsetFamily() { + this.family = null; + } + + /** Returns true if field family is set (has been assigned a value) and false otherwise */ + public boolean isSetFamily() { + return this.family != null; + } + + public void setFamilyIsSet(boolean value) { + if (!value) { + this.family = null; + } + } + + public byte[] getQualifier() { + setQualifier(org.apache.thrift.TBaseHelper.rightSize(qualifier)); + return qualifier == null ? null : qualifier.array(); + } + + public ByteBuffer bufferForQualifier() { + return qualifier; + } + + public TColumnValue setQualifier(byte[] qualifier) { + setQualifier(qualifier == null ? (ByteBuffer)null : ByteBuffer.wrap(qualifier)); + return this; + } + + public TColumnValue setQualifier(ByteBuffer qualifier) { + this.qualifier = qualifier; + return this; + } + + public void unsetQualifier() { + this.qualifier = null; + } + + /** Returns true if field qualifier is set (has been assigned a value) and false otherwise */ + public boolean isSetQualifier() { + return this.qualifier != null; + } + + public void setQualifierIsSet(boolean value) { + if (!value) { + this.qualifier = null; + } + } + + public byte[] getValue() { + setValue(org.apache.thrift.TBaseHelper.rightSize(value)); + return value == null ? null : value.array(); + } + + public ByteBuffer bufferForValue() { + return value; + } + + public TColumnValue setValue(byte[] value) { + setValue(value == null ? (ByteBuffer)null : ByteBuffer.wrap(value)); + return this; + } + + public TColumnValue setValue(ByteBuffer value) { + this.value = value; + return this; + } + + public void unsetValue() { + this.value = null; + } + + /** Returns true if field value is set (has been assigned a value) and false otherwise */ + public boolean isSetValue() { + return this.value != null; + } + + public void setValueIsSet(boolean value) { + if (!value) { + this.value = null; + } + } + + public long getTimestamp() { + return this.timestamp; + } + + public TColumnValue setTimestamp(long timestamp) { + this.timestamp = timestamp; + setTimestampIsSet(true); + return this; + } + + public void unsetTimestamp() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TIMESTAMP_ISSET_ID); + } + + /** Returns true if field timestamp is set (has been assigned a value) and false otherwise */ + public boolean isSetTimestamp() { + return EncodingUtils.testBit(__isset_bitfield, __TIMESTAMP_ISSET_ID); + } + + public void setTimestampIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TIMESTAMP_ISSET_ID, value); + } + + public byte getType() { + return this.type; + } + + public TColumnValue setType(byte type) { + this.type = type; + setTypeIsSet(true); + return this; + } + + public void unsetType() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TYPE_ISSET_ID); + } + + /** Returns true if field type is set (has been assigned a value) and false otherwise */ + public boolean isSetType() { + return EncodingUtils.testBit(__isset_bitfield, __TYPE_ISSET_ID); + } + + public void setTypeIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TYPE_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case ROW: + if (value == null) { + unsetRow(); + } else { + setRow((ByteBuffer)value); + } + break; + + case FAMILY: + if (value == null) { + unsetFamily(); + } else { + setFamily((ByteBuffer)value); + } + break; + + case QUALIFIER: + if (value == null) { + unsetQualifier(); + } else { + setQualifier((ByteBuffer)value); + } + break; + + case VALUE: + if (value == null) { + unsetValue(); + } else { + setValue((ByteBuffer)value); + } + break; + + case TIMESTAMP: + if (value == null) { + unsetTimestamp(); + } else { + setTimestamp((Long)value); + } + break; + + case TYPE: + if (value == null) { + unsetType(); + } else { + setType((Byte)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case ROW: + return getRow(); + + case FAMILY: + return getFamily(); + + case QUALIFIER: + return getQualifier(); + + case VALUE: + return getValue(); + + case TIMESTAMP: + return Long.valueOf(getTimestamp()); + + case TYPE: + return Byte.valueOf(getType()); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case ROW: + return isSetRow(); + case FAMILY: + return isSetFamily(); + case QUALIFIER: + return isSetQualifier(); + case VALUE: + return isSetValue(); + case TIMESTAMP: + return isSetTimestamp(); + case TYPE: + return isSetType(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TColumnValue) + return this.equals((TColumnValue)that); + return false; + } + + public boolean equals(TColumnValue that) { + if (that == null) + return false; + + boolean this_present_row = true && this.isSetRow(); + boolean that_present_row = true && that.isSetRow(); + if (this_present_row || that_present_row) { + if (!(this_present_row && that_present_row)) + return false; + if (!this.row.equals(that.row)) + return false; + } + + boolean this_present_family = true && this.isSetFamily(); + boolean that_present_family = true && that.isSetFamily(); + if (this_present_family || that_present_family) { + if (!(this_present_family && that_present_family)) + return false; + if (!this.family.equals(that.family)) + return false; + } + + boolean this_present_qualifier = true && this.isSetQualifier(); + boolean that_present_qualifier = true && that.isSetQualifier(); + if (this_present_qualifier || that_present_qualifier) { + if (!(this_present_qualifier && that_present_qualifier)) + return false; + if (!this.qualifier.equals(that.qualifier)) + return false; + } + + boolean this_present_value = true && this.isSetValue(); + boolean that_present_value = true && that.isSetValue(); + if (this_present_value || that_present_value) { + if (!(this_present_value && that_present_value)) + return false; + if (!this.value.equals(that.value)) + return false; + } + + boolean this_present_timestamp = true && this.isSetTimestamp(); + boolean that_present_timestamp = true && that.isSetTimestamp(); + if (this_present_timestamp || that_present_timestamp) { + if (!(this_present_timestamp && that_present_timestamp)) + return false; + if (this.timestamp != that.timestamp) + return false; + } + + boolean this_present_type = true; + boolean that_present_type = true; + if (this_present_type || that_present_type) { + if (!(this_present_type && that_present_type)) + return false; + if (this.type != that.type) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public int compareTo(TColumnValue other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetRow()).compareTo(other.isSetRow()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRow()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.row, other.row); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetFamily()).compareTo(other.isSetFamily()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFamily()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.family, other.family); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetQualifier()).compareTo(other.isSetQualifier()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetQualifier()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.qualifier, other.qualifier); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValue()).compareTo(other.isSetValue()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValue()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, other.value); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTimestamp()).compareTo(other.isSetTimestamp()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTimestamp()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.timestamp, other.timestamp); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetType()).compareTo(other.isSetType()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetType()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.type, other.type); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TColumnValue("); + boolean first = true; + + sb.append("row:"); + if (this.row == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.row, sb); + } + first = false; + if (!first) sb.append(", "); + sb.append("family:"); + if (this.family == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.family, sb); + } + first = false; + if (!first) sb.append(", "); + sb.append("qualifier:"); + if (this.qualifier == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.qualifier, sb); + } + first = false; + if (!first) sb.append(", "); + sb.append("value:"); + if (this.value == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.value, sb); + } + first = false; + if (isSetTimestamp()) { + if (!first) sb.append(", "); + sb.append("timestamp:"); + sb.append(this.timestamp); + first = false; + } + if (!first) sb.append(", "); + sb.append("type:"); + sb.append(this.type); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (row == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'row' was not present! Struct: " + toString()); + } + if (family == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'family' was not present! Struct: " + toString()); + } + if (qualifier == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'qualifier' was not present! Struct: " + toString()); + } + if (value == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'value' was not present! Struct: " + toString()); + } + // alas, we cannot check 'type' because it's a primitive and you chose the non-beans generator. + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TColumnValueStandardSchemeFactory implements SchemeFactory { + public TColumnValueStandardScheme getScheme() { + return new TColumnValueStandardScheme(); + } + } + + private static class TColumnValueStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnValue struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // ROW + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.row = iprot.readBinary(); + struct.setRowIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // FAMILY + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.family = iprot.readBinary(); + struct.setFamilyIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // QUALIFIER + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.qualifier = iprot.readBinary(); + struct.setQualifierIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // VALUE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.value = iprot.readBinary(); + struct.setValueIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // TIMESTAMP + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.timestamp = iprot.readI64(); + struct.setTimestampIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 6: // TYPE + if (schemeField.type == org.apache.thrift.protocol.TType.BYTE) { + struct.type = iprot.readByte(); + struct.setTypeIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + if (!struct.isSetType()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'type' was not found in serialized data! Struct: " + toString()); + } + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TColumnValue struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.row != null) { + oprot.writeFieldBegin(ROW_FIELD_DESC); + oprot.writeBinary(struct.row); + oprot.writeFieldEnd(); + } + if (struct.family != null) { + oprot.writeFieldBegin(FAMILY_FIELD_DESC); + oprot.writeBinary(struct.family); + oprot.writeFieldEnd(); + } + if (struct.qualifier != null) { + oprot.writeFieldBegin(QUALIFIER_FIELD_DESC); + oprot.writeBinary(struct.qualifier); + oprot.writeFieldEnd(); + } + if (struct.value != null) { + oprot.writeFieldBegin(VALUE_FIELD_DESC); + oprot.writeBinary(struct.value); + oprot.writeFieldEnd(); + } + if (struct.isSetTimestamp()) { + oprot.writeFieldBegin(TIMESTAMP_FIELD_DESC); + oprot.writeI64(struct.timestamp); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(TYPE_FIELD_DESC); + oprot.writeByte(struct.type); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TColumnValueTupleSchemeFactory implements SchemeFactory { + public TColumnValueTupleScheme getScheme() { + return new TColumnValueTupleScheme(); + } + } + + private static class TColumnValueTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TColumnValue struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeBinary(struct.row); + oprot.writeBinary(struct.family); + oprot.writeBinary(struct.qualifier); + oprot.writeBinary(struct.value); + oprot.writeByte(struct.type); + BitSet optionals = new BitSet(); + if (struct.isSetTimestamp()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetTimestamp()) { + oprot.writeI64(struct.timestamp); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TColumnValue struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.row = iprot.readBinary(); + struct.setRowIsSet(true); + struct.family = iprot.readBinary(); + struct.setFamilyIsSet(true); + struct.qualifier = iprot.readBinary(); + struct.setQualifierIsSet(true); + struct.value = iprot.readBinary(); + struct.setValueIsSet(true); + struct.type = iprot.readByte(); + struct.setTypeIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.timestamp = iprot.readI64(); + struct.setTimestampIsSet(true); + } + } + } + +} + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TEdit.java new file mode 100644 index 0000000..6c3e25d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TEdit.java @@ -0,0 +1,637 @@ +/** + * Autogenerated by Thrift Compiler (0.9.1) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hbase.replication.thrift.generated; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class TEdit implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TEdit"); + + private static final org.apache.thrift.protocol.TField H_LOG_KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("hLogKey", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField WAL_EDIT_FIELD_DESC = new org.apache.thrift.protocol.TField("walEdit", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField CLUSTER_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("clusterIds", org.apache.thrift.protocol.TType.LIST, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TEditStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TEditTupleSchemeFactory()); + } + + public THLogKey hLogKey; // required + public TWalLEdit walEdit; // required + public List clusterIds; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + H_LOG_KEY((short)1, "hLogKey"), + WAL_EDIT((short)2, "walEdit"), + CLUSTER_IDS((short)3, "clusterIds"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // H_LOG_KEY + return H_LOG_KEY; + case 2: // WAL_EDIT + return WAL_EDIT; + case 3: // CLUSTER_IDS + return CLUSTER_IDS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.H_LOG_KEY, new org.apache.thrift.meta_data.FieldMetaData("hLogKey", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, THLogKey.class))); + tmpMap.put(_Fields.WAL_EDIT, new org.apache.thrift.meta_data.FieldMetaData("walEdit", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TWalLEdit.class))); + tmpMap.put(_Fields.CLUSTER_IDS, new org.apache.thrift.meta_data.FieldMetaData("clusterIds", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TClusterId.class)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TEdit.class, metaDataMap); + } + + public TEdit() { + } + + public TEdit( + THLogKey hLogKey, + TWalLEdit walEdit, + List clusterIds) + { + this(); + this.hLogKey = hLogKey; + this.walEdit = walEdit; + this.clusterIds = clusterIds; + } + + /** + * Performs a deep copy on other. + */ + public TEdit(TEdit other) { + if (other.isSetHLogKey()) { + this.hLogKey = new THLogKey(other.hLogKey); + } + if (other.isSetWalEdit()) { + this.walEdit = new TWalLEdit(other.walEdit); + } + if (other.isSetClusterIds()) { + List __this__clusterIds = new ArrayList(other.clusterIds.size()); + for (TClusterId other_element : other.clusterIds) { + __this__clusterIds.add(new TClusterId(other_element)); + } + this.clusterIds = __this__clusterIds; + } + } + + public TEdit deepCopy() { + return new TEdit(this); + } + + @Override + public void clear() { + this.hLogKey = null; + this.walEdit = null; + this.clusterIds = null; + } + + public THLogKey getHLogKey() { + return this.hLogKey; + } + + public TEdit setHLogKey(THLogKey hLogKey) { + this.hLogKey = hLogKey; + return this; + } + + public void unsetHLogKey() { + this.hLogKey = null; + } + + /** Returns true if field hLogKey is set (has been assigned a value) and false otherwise */ + public boolean isSetHLogKey() { + return this.hLogKey != null; + } + + public void setHLogKeyIsSet(boolean value) { + if (!value) { + this.hLogKey = null; + } + } + + public TWalLEdit getWalEdit() { + return this.walEdit; + } + + public TEdit setWalEdit(TWalLEdit walEdit) { + this.walEdit = walEdit; + return this; + } + + public void unsetWalEdit() { + this.walEdit = null; + } + + /** Returns true if field walEdit is set (has been assigned a value) and false otherwise */ + public boolean isSetWalEdit() { + return this.walEdit != null; + } + + public void setWalEditIsSet(boolean value) { + if (!value) { + this.walEdit = null; + } + } + + public int getClusterIdsSize() { + return (this.clusterIds == null) ? 0 : this.clusterIds.size(); + } + + public java.util.Iterator getClusterIdsIterator() { + return (this.clusterIds == null) ? null : this.clusterIds.iterator(); + } + + public void addToClusterIds(TClusterId elem) { + if (this.clusterIds == null) { + this.clusterIds = new ArrayList(); + } + this.clusterIds.add(elem); + } + + public List getClusterIds() { + return this.clusterIds; + } + + public TEdit setClusterIds(List clusterIds) { + this.clusterIds = clusterIds; + return this; + } + + public void unsetClusterIds() { + this.clusterIds = null; + } + + /** Returns true if field clusterIds is set (has been assigned a value) and false otherwise */ + public boolean isSetClusterIds() { + return this.clusterIds != null; + } + + public void setClusterIdsIsSet(boolean value) { + if (!value) { + this.clusterIds = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case H_LOG_KEY: + if (value == null) { + unsetHLogKey(); + } else { + setHLogKey((THLogKey)value); + } + break; + + case WAL_EDIT: + if (value == null) { + unsetWalEdit(); + } else { + setWalEdit((TWalLEdit)value); + } + break; + + case CLUSTER_IDS: + if (value == null) { + unsetClusterIds(); + } else { + setClusterIds((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case H_LOG_KEY: + return getHLogKey(); + + case WAL_EDIT: + return getWalEdit(); + + case CLUSTER_IDS: + return getClusterIds(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case H_LOG_KEY: + return isSetHLogKey(); + case WAL_EDIT: + return isSetWalEdit(); + case CLUSTER_IDS: + return isSetClusterIds(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TEdit) + return this.equals((TEdit)that); + return false; + } + + public boolean equals(TEdit that) { + if (that == null) + return false; + + boolean this_present_hLogKey = true && this.isSetHLogKey(); + boolean that_present_hLogKey = true && that.isSetHLogKey(); + if (this_present_hLogKey || that_present_hLogKey) { + if (!(this_present_hLogKey && that_present_hLogKey)) + return false; + if (!this.hLogKey.equals(that.hLogKey)) + return false; + } + + boolean this_present_walEdit = true && this.isSetWalEdit(); + boolean that_present_walEdit = true && that.isSetWalEdit(); + if (this_present_walEdit || that_present_walEdit) { + if (!(this_present_walEdit && that_present_walEdit)) + return false; + if (!this.walEdit.equals(that.walEdit)) + return false; + } + + boolean this_present_clusterIds = true && this.isSetClusterIds(); + boolean that_present_clusterIds = true && that.isSetClusterIds(); + if (this_present_clusterIds || that_present_clusterIds) { + if (!(this_present_clusterIds && that_present_clusterIds)) + return false; + if (!this.clusterIds.equals(that.clusterIds)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public int compareTo(TEdit other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetHLogKey()).compareTo(other.isSetHLogKey()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetHLogKey()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.hLogKey, other.hLogKey); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWalEdit()).compareTo(other.isSetWalEdit()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWalEdit()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.walEdit, other.walEdit); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetClusterIds()).compareTo(other.isSetClusterIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetClusterIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.clusterIds, other.clusterIds); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TEdit("); + boolean first = true; + + sb.append("hLogKey:"); + if (this.hLogKey == null) { + sb.append("null"); + } else { + sb.append(this.hLogKey); + } + first = false; + if (!first) sb.append(", "); + sb.append("walEdit:"); + if (this.walEdit == null) { + sb.append("null"); + } else { + sb.append(this.walEdit); + } + first = false; + if (!first) sb.append(", "); + sb.append("clusterIds:"); + if (this.clusterIds == null) { + sb.append("null"); + } else { + sb.append(this.clusterIds); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (hLogKey == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'hLogKey' was not present! Struct: " + toString()); + } + if (walEdit == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'walEdit' was not present! Struct: " + toString()); + } + if (clusterIds == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'clusterIds' was not present! Struct: " + toString()); + } + // check for sub-struct validity + if (hLogKey != null) { + hLogKey.validate(); + } + if (walEdit != null) { + walEdit.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TEditStandardSchemeFactory implements SchemeFactory { + public TEditStandardScheme getScheme() { + return new TEditStandardScheme(); + } + } + + private static class TEditStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TEdit struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // H_LOG_KEY + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.hLogKey = new THLogKey(); + struct.hLogKey.read(iprot); + struct.setHLogKeyIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // WAL_EDIT + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.walEdit = new TWalLEdit(); + struct.walEdit.read(iprot); + struct.setWalEditIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // CLUSTER_IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list8 = iprot.readListBegin(); + struct.clusterIds = new ArrayList(_list8.size); + for (int _i9 = 0; _i9 < _list8.size; ++_i9) + { + TClusterId _elem10; + _elem10 = new TClusterId(); + _elem10.read(iprot); + struct.clusterIds.add(_elem10); + } + iprot.readListEnd(); + } + struct.setClusterIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TEdit struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.hLogKey != null) { + oprot.writeFieldBegin(H_LOG_KEY_FIELD_DESC); + struct.hLogKey.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.walEdit != null) { + oprot.writeFieldBegin(WAL_EDIT_FIELD_DESC); + struct.walEdit.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.clusterIds != null) { + oprot.writeFieldBegin(CLUSTER_IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.clusterIds.size())); + for (TClusterId _iter11 : struct.clusterIds) + { + _iter11.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TEditTupleSchemeFactory implements SchemeFactory { + public TEditTupleScheme getScheme() { + return new TEditTupleScheme(); + } + } + + private static class TEditTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TEdit struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + struct.hLogKey.write(oprot); + struct.walEdit.write(oprot); + { + oprot.writeI32(struct.clusterIds.size()); + for (TClusterId _iter12 : struct.clusterIds) + { + _iter12.write(oprot); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TEdit struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.hLogKey = new THLogKey(); + struct.hLogKey.read(iprot); + struct.setHLogKeyIsSet(true); + struct.walEdit = new TWalLEdit(); + struct.walEdit.read(iprot); + struct.setWalEditIsSet(true); + { + org.apache.thrift.protocol.TList _list13 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.clusterIds = new ArrayList(_list13.size); + for (int _i14 = 0; _i14 < _list13.size; ++_i14) + { + TClusterId _elem15; + _elem15 = new TClusterId(); + _elem15.read(iprot); + struct.clusterIds.add(_elem15); + } + } + struct.setClusterIdsIsSet(true); + } + } + +} + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/THBaseService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/THBaseService.java new file mode 100644 index 0000000..6e4f633 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/THBaseService.java @@ -0,0 +1,2310 @@ +/** + * Autogenerated by Thrift Compiler (0.9.1) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hbase.replication.thrift.generated; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class THBaseService { + + public interface Iface { + + public void replicate(TBatchEdit edits) throws TIOError, org.apache.thrift.TException; + + public void ping() throws org.apache.thrift.TException; + + public String getClusterUUID() throws org.apache.thrift.TException; + + } + + public interface AsyncIface { + + public void replicate(TBatchEdit edits, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void ping(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void getClusterUUID(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + } + + public static class Client extends org.apache.thrift.TServiceClient implements Iface { + public static class Factory implements org.apache.thrift.TServiceClientFactory { + public Factory() {} + public Client getClient(org.apache.thrift.protocol.TProtocol prot) { + return new Client(prot); + } + public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) { + return new Client(iprot, oprot); + } + } + + public Client(org.apache.thrift.protocol.TProtocol prot) + { + super(prot, prot); + } + + public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) { + super(iprot, oprot); + } + + public void replicate(TBatchEdit edits) throws TIOError, org.apache.thrift.TException + { + send_replicate(edits); + recv_replicate(); + } + + public void send_replicate(TBatchEdit edits) throws org.apache.thrift.TException + { + replicate_args args = new replicate_args(); + args.setEdits(edits); + sendBase("replicate", args); + } + + public void recv_replicate() throws TIOError, org.apache.thrift.TException + { + replicate_result result = new replicate_result(); + receiveBase(result, "replicate"); + if (result.io != null) { + throw result.io; + } + return; + } + + public void ping() throws org.apache.thrift.TException + { + send_ping(); + recv_ping(); + } + + public void send_ping() throws org.apache.thrift.TException + { + ping_args args = new ping_args(); + sendBase("ping", args); + } + + public void recv_ping() throws org.apache.thrift.TException + { + ping_result result = new ping_result(); + receiveBase(result, "ping"); + return; + } + + public String getClusterUUID() throws org.apache.thrift.TException + { + send_getClusterUUID(); + return recv_getClusterUUID(); + } + + public void send_getClusterUUID() throws org.apache.thrift.TException + { + getClusterUUID_args args = new getClusterUUID_args(); + sendBase("getClusterUUID", args); + } + + public String recv_getClusterUUID() throws org.apache.thrift.TException + { + getClusterUUID_result result = new getClusterUUID_result(); + receiveBase(result, "getClusterUUID"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getClusterUUID failed: unknown result"); + } + + } + public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface { + public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { + private org.apache.thrift.async.TAsyncClientManager clientManager; + private org.apache.thrift.protocol.TProtocolFactory protocolFactory; + public Factory(org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.protocol.TProtocolFactory protocolFactory) { + this.clientManager = clientManager; + this.protocolFactory = protocolFactory; + } + public AsyncClient getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) { + return new AsyncClient(protocolFactory, clientManager, transport); + } + } + + public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.transport.TNonblockingTransport transport) { + super(protocolFactory, clientManager, transport); + } + + public void replicate(TBatchEdit edits, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + replicate_call method_call = new replicate_call(edits, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class replicate_call extends org.apache.thrift.async.TAsyncMethodCall { + private TBatchEdit edits; + public replicate_call(TBatchEdit edits, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.edits = edits; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("replicate", org.apache.thrift.protocol.TMessageType.CALL, 0)); + replicate_args args = new replicate_args(); + args.setEdits(edits); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws TIOError, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_replicate(); + } + } + + public void ping(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + ping_call method_call = new ping_call(resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class ping_call extends org.apache.thrift.async.TAsyncMethodCall { + public ping_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("ping", org.apache.thrift.protocol.TMessageType.CALL, 0)); + ping_args args = new ping_args(); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_ping(); + } + } + + public void getClusterUUID(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + getClusterUUID_call method_call = new getClusterUUID_call(resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class getClusterUUID_call extends org.apache.thrift.async.TAsyncMethodCall { + public getClusterUUID_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getClusterUUID", org.apache.thrift.protocol.TMessageType.CALL, 0)); + getClusterUUID_args args = new getClusterUUID_args(); + args.write(prot); + prot.writeMessageEnd(); + } + + public String getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_getClusterUUID(); + } + } + + } + + public static class Processor extends org.apache.thrift.TBaseProcessor implements org.apache.thrift.TProcessor { + private static final Logger LOGGER = LoggerFactory.getLogger(Processor.class.getName()); + public Processor(I iface) { + super(iface, getProcessMap(new HashMap>())); + } + + protected Processor(I iface, Map> processMap) { + super(iface, getProcessMap(processMap)); + } + + private static Map> getProcessMap(Map> processMap) { + processMap.put("replicate", new replicate()); + processMap.put("ping", new ping()); + processMap.put("getClusterUUID", new getClusterUUID()); + return processMap; + } + + public static class replicate extends org.apache.thrift.ProcessFunction { + public replicate() { + super("replicate"); + } + + public replicate_args getEmptyArgsInstance() { + return new replicate_args(); + } + + protected boolean isOneway() { + return false; + } + + public replicate_result getResult(I iface, replicate_args args) throws org.apache.thrift.TException { + replicate_result result = new replicate_result(); + try { + iface.replicate(args.edits); + } catch (TIOError io) { + result.io = io; + } + return result; + } + } + + public static class ping extends org.apache.thrift.ProcessFunction { + public ping() { + super("ping"); + } + + public ping_args getEmptyArgsInstance() { + return new ping_args(); + } + + protected boolean isOneway() { + return false; + } + + public ping_result getResult(I iface, ping_args args) throws org.apache.thrift.TException { + ping_result result = new ping_result(); + iface.ping(); + return result; + } + } + + public static class getClusterUUID extends org.apache.thrift.ProcessFunction { + public getClusterUUID() { + super("getClusterUUID"); + } + + public getClusterUUID_args getEmptyArgsInstance() { + return new getClusterUUID_args(); + } + + protected boolean isOneway() { + return false; + } + + public getClusterUUID_result getResult(I iface, getClusterUUID_args args) throws org.apache.thrift.TException { + getClusterUUID_result result = new getClusterUUID_result(); + result.success = iface.getClusterUUID(); + return result; + } + } + + } + + public static class AsyncProcessor extends org.apache.thrift.TBaseAsyncProcessor { + private static final Logger LOGGER = LoggerFactory.getLogger(AsyncProcessor.class.getName()); + public AsyncProcessor(I iface) { + super(iface, getProcessMap(new HashMap>())); + } + + protected AsyncProcessor(I iface, Map> processMap) { + super(iface, getProcessMap(processMap)); + } + + private static Map> getProcessMap(Map> processMap) { + processMap.put("replicate", new replicate()); + processMap.put("ping", new ping()); + processMap.put("getClusterUUID", new getClusterUUID()); + return processMap; + } + + public static class replicate extends org.apache.thrift.AsyncProcessFunction { + public replicate() { + super("replicate"); + } + + public replicate_args getEmptyArgsInstance() { + return new replicate_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + replicate_result result = new replicate_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + replicate_result result = new replicate_result(); + if (e instanceof TIOError) { + result.io = (TIOError) e; + result.setIoIsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, replicate_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.replicate(args.edits,resultHandler); + } + } + + public static class ping extends org.apache.thrift.AsyncProcessFunction { + public ping() { + super("ping"); + } + + public ping_args getEmptyArgsInstance() { + return new ping_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + ping_result result = new ping_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + ping_result result = new ping_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, ping_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.ping(resultHandler); + } + } + + public static class getClusterUUID extends org.apache.thrift.AsyncProcessFunction { + public getClusterUUID() { + super("getClusterUUID"); + } + + public getClusterUUID_args getEmptyArgsInstance() { + return new getClusterUUID_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(String o) { + getClusterUUID_result result = new getClusterUUID_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + getClusterUUID_result result = new getClusterUUID_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, getClusterUUID_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.getClusterUUID(resultHandler); + } + } + + } + + public static class replicate_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("replicate_args"); + + private static final org.apache.thrift.protocol.TField EDITS_FIELD_DESC = new org.apache.thrift.protocol.TField("edits", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new replicate_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new replicate_argsTupleSchemeFactory()); + } + + public TBatchEdit edits; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + EDITS((short)1, "edits"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // EDITS + return EDITS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.EDITS, new org.apache.thrift.meta_data.FieldMetaData("edits", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TBatchEdit.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(replicate_args.class, metaDataMap); + } + + public replicate_args() { + } + + public replicate_args( + TBatchEdit edits) + { + this(); + this.edits = edits; + } + + /** + * Performs a deep copy on other. + */ + public replicate_args(replicate_args other) { + if (other.isSetEdits()) { + this.edits = new TBatchEdit(other.edits); + } + } + + public replicate_args deepCopy() { + return new replicate_args(this); + } + + @Override + public void clear() { + this.edits = null; + } + + public TBatchEdit getEdits() { + return this.edits; + } + + public replicate_args setEdits(TBatchEdit edits) { + this.edits = edits; + return this; + } + + public void unsetEdits() { + this.edits = null; + } + + /** Returns true if field edits is set (has been assigned a value) and false otherwise */ + public boolean isSetEdits() { + return this.edits != null; + } + + public void setEditsIsSet(boolean value) { + if (!value) { + this.edits = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case EDITS: + if (value == null) { + unsetEdits(); + } else { + setEdits((TBatchEdit)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case EDITS: + return getEdits(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case EDITS: + return isSetEdits(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof replicate_args) + return this.equals((replicate_args)that); + return false; + } + + public boolean equals(replicate_args that) { + if (that == null) + return false; + + boolean this_present_edits = true && this.isSetEdits(); + boolean that_present_edits = true && that.isSetEdits(); + if (this_present_edits || that_present_edits) { + if (!(this_present_edits && that_present_edits)) + return false; + if (!this.edits.equals(that.edits)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public int compareTo(replicate_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetEdits()).compareTo(other.isSetEdits()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEdits()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.edits, other.edits); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("replicate_args("); + boolean first = true; + + sb.append("edits:"); + if (this.edits == null) { + sb.append("null"); + } else { + sb.append(this.edits); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (edits == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'edits' was not present! Struct: " + toString()); + } + // check for sub-struct validity + if (edits != null) { + edits.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class replicate_argsStandardSchemeFactory implements SchemeFactory { + public replicate_argsStandardScheme getScheme() { + return new replicate_argsStandardScheme(); + } + } + + private static class replicate_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, replicate_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // EDITS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.edits = new TBatchEdit(); + struct.edits.read(iprot); + struct.setEditsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, replicate_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.edits != null) { + oprot.writeFieldBegin(EDITS_FIELD_DESC); + struct.edits.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class replicate_argsTupleSchemeFactory implements SchemeFactory { + public replicate_argsTupleScheme getScheme() { + return new replicate_argsTupleScheme(); + } + } + + private static class replicate_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, replicate_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + struct.edits.write(oprot); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, replicate_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.edits = new TBatchEdit(); + struct.edits.read(iprot); + struct.setEditsIsSet(true); + } + } + + } + + public static class replicate_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("replicate_result"); + + private static final org.apache.thrift.protocol.TField IO_FIELD_DESC = new org.apache.thrift.protocol.TField("io", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new replicate_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new replicate_resultTupleSchemeFactory()); + } + + public TIOError io; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + IO((short)1, "io"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // IO + return IO; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.IO, new org.apache.thrift.meta_data.FieldMetaData("io", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(replicate_result.class, metaDataMap); + } + + public replicate_result() { + } + + public replicate_result( + TIOError io) + { + this(); + this.io = io; + } + + /** + * Performs a deep copy on other. + */ + public replicate_result(replicate_result other) { + if (other.isSetIo()) { + this.io = new TIOError(other.io); + } + } + + public replicate_result deepCopy() { + return new replicate_result(this); + } + + @Override + public void clear() { + this.io = null; + } + + public TIOError getIo() { + return this.io; + } + + public replicate_result setIo(TIOError io) { + this.io = io; + return this; + } + + public void unsetIo() { + this.io = null; + } + + /** Returns true if field io is set (has been assigned a value) and false otherwise */ + public boolean isSetIo() { + return this.io != null; + } + + public void setIoIsSet(boolean value) { + if (!value) { + this.io = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case IO: + if (value == null) { + unsetIo(); + } else { + setIo((TIOError)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case IO: + return getIo(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case IO: + return isSetIo(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof replicate_result) + return this.equals((replicate_result)that); + return false; + } + + public boolean equals(replicate_result that) { + if (that == null) + return false; + + boolean this_present_io = true && this.isSetIo(); + boolean that_present_io = true && that.isSetIo(); + if (this_present_io || that_present_io) { + if (!(this_present_io && that_present_io)) + return false; + if (!this.io.equals(that.io)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public int compareTo(replicate_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetIo()).compareTo(other.isSetIo()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIo()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.io, other.io); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("replicate_result("); + boolean first = true; + + sb.append("io:"); + if (this.io == null) { + sb.append("null"); + } else { + sb.append(this.io); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class replicate_resultStandardSchemeFactory implements SchemeFactory { + public replicate_resultStandardScheme getScheme() { + return new replicate_resultStandardScheme(); + } + } + + private static class replicate_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, replicate_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // IO + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.io = new TIOError(); + struct.io.read(iprot); + struct.setIoIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, replicate_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.io != null) { + oprot.writeFieldBegin(IO_FIELD_DESC); + struct.io.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class replicate_resultTupleSchemeFactory implements SchemeFactory { + public replicate_resultTupleScheme getScheme() { + return new replicate_resultTupleScheme(); + } + } + + private static class replicate_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, replicate_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetIo()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetIo()) { + struct.io.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, replicate_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.io = new TIOError(); + struct.io.read(iprot); + struct.setIoIsSet(true); + } + } + } + + } + + public static class ping_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ping_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new ping_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new ping_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ping_args.class, metaDataMap); + } + + public ping_args() { + } + + /** + * Performs a deep copy on other. + */ + public ping_args(ping_args other) { + } + + public ping_args deepCopy() { + return new ping_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof ping_args) + return this.equals((ping_args)that); + return false; + } + + public boolean equals(ping_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public int compareTo(ping_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("ping_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class ping_argsStandardSchemeFactory implements SchemeFactory { + public ping_argsStandardScheme getScheme() { + return new ping_argsStandardScheme(); + } + } + + private static class ping_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, ping_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, ping_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class ping_argsTupleSchemeFactory implements SchemeFactory { + public ping_argsTupleScheme getScheme() { + return new ping_argsTupleScheme(); + } + } + + private static class ping_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, ping_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, ping_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + public static class ping_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ping_result"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new ping_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new ping_resultTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ping_result.class, metaDataMap); + } + + public ping_result() { + } + + /** + * Performs a deep copy on other. + */ + public ping_result(ping_result other) { + } + + public ping_result deepCopy() { + return new ping_result(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof ping_result) + return this.equals((ping_result)that); + return false; + } + + public boolean equals(ping_result that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public int compareTo(ping_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("ping_result("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class ping_resultStandardSchemeFactory implements SchemeFactory { + public ping_resultStandardScheme getScheme() { + return new ping_resultStandardScheme(); + } + } + + private static class ping_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, ping_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, ping_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class ping_resultTupleSchemeFactory implements SchemeFactory { + public ping_resultTupleScheme getScheme() { + return new ping_resultTupleScheme(); + } + } + + private static class ping_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, ping_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, ping_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + public static class getClusterUUID_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getClusterUUID_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new getClusterUUID_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new getClusterUUID_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getClusterUUID_args.class, metaDataMap); + } + + public getClusterUUID_args() { + } + + /** + * Performs a deep copy on other. + */ + public getClusterUUID_args(getClusterUUID_args other) { + } + + public getClusterUUID_args deepCopy() { + return new getClusterUUID_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof getClusterUUID_args) + return this.equals((getClusterUUID_args)that); + return false; + } + + public boolean equals(getClusterUUID_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public int compareTo(getClusterUUID_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("getClusterUUID_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class getClusterUUID_argsStandardSchemeFactory implements SchemeFactory { + public getClusterUUID_argsStandardScheme getScheme() { + return new getClusterUUID_argsStandardScheme(); + } + } + + private static class getClusterUUID_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterUUID_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterUUID_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class getClusterUUID_argsTupleSchemeFactory implements SchemeFactory { + public getClusterUUID_argsTupleScheme getScheme() { + return new getClusterUUID_argsTupleScheme(); + } + } + + private static class getClusterUUID_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, getClusterUUID_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, getClusterUUID_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + public static class getClusterUUID_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getClusterUUID_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new getClusterUUID_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new getClusterUUID_resultTupleSchemeFactory()); + } + + public String success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getClusterUUID_result.class, metaDataMap); + } + + public getClusterUUID_result() { + } + + public getClusterUUID_result( + String success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public getClusterUUID_result(getClusterUUID_result other) { + if (other.isSetSuccess()) { + this.success = other.success; + } + } + + public getClusterUUID_result deepCopy() { + return new getClusterUUID_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public String getSuccess() { + return this.success; + } + + public getClusterUUID_result setSuccess(String success) { + this.success = success; + return this; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof getClusterUUID_result) + return this.equals((getClusterUUID_result)that); + return false; + } + + public boolean equals(getClusterUUID_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public int compareTo(getClusterUUID_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("getClusterUUID_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class getClusterUUID_resultStandardSchemeFactory implements SchemeFactory { + public getClusterUUID_resultStandardScheme getScheme() { + return new getClusterUUID_resultStandardScheme(); + } + } + + private static class getClusterUUID_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterUUID_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.success = iprot.readString(); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterUUID_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeString(struct.success); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class getClusterUUID_resultTupleSchemeFactory implements SchemeFactory { + public getClusterUUID_resultTupleScheme getScheme() { + return new getClusterUUID_resultTupleScheme(); + } + } + + private static class getClusterUUID_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, getClusterUUID_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + oprot.writeString(struct.success); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, getClusterUUID_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = iprot.readString(); + struct.setSuccessIsSet(true); + } + } + } + + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/THLogKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/THLogKey.java new file mode 100644 index 0000000..93615a8 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/THLogKey.java @@ -0,0 +1,582 @@ +/** + * Autogenerated by Thrift Compiler (0.9.1) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hbase.replication.thrift.generated; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Mapping for HLogKey + * + */ +public class THLogKey implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("THLogKey"); + + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField WRITE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("writeTime", org.apache.thrift.protocol.TType.I64, (short)2); + private static final org.apache.thrift.protocol.TField SEQ_NUM_FIELD_DESC = new org.apache.thrift.protocol.TField("seqNum", org.apache.thrift.protocol.TType.I64, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new THLogKeyStandardSchemeFactory()); + schemes.put(TupleScheme.class, new THLogKeyTupleSchemeFactory()); + } + + public ByteBuffer tableName; // required + public long writeTime; // required + public long seqNum; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TABLE_NAME((short)1, "tableName"), + WRITE_TIME((short)2, "writeTime"), + SEQ_NUM((short)3, "seqNum"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TABLE_NAME + return TABLE_NAME; + case 2: // WRITE_TIME + return WRITE_TIME; + case 3: // SEQ_NUM + return SEQ_NUM; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __WRITETIME_ISSET_ID = 0; + private static final int __SEQNUM_ISSET_ID = 1; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); + tmpMap.put(_Fields.WRITE_TIME, new org.apache.thrift.meta_data.FieldMetaData("writeTime", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.SEQ_NUM, new org.apache.thrift.meta_data.FieldMetaData("seqNum", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(THLogKey.class, metaDataMap); + } + + public THLogKey() { + } + + public THLogKey( + ByteBuffer tableName, + long writeTime, + long seqNum) + { + this(); + this.tableName = tableName; + this.writeTime = writeTime; + setWriteTimeIsSet(true); + this.seqNum = seqNum; + setSeqNumIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public THLogKey(THLogKey other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetTableName()) { + this.tableName = org.apache.thrift.TBaseHelper.copyBinary(other.tableName); +; + } + this.writeTime = other.writeTime; + this.seqNum = other.seqNum; + } + + public THLogKey deepCopy() { + return new THLogKey(this); + } + + @Override + public void clear() { + this.tableName = null; + setWriteTimeIsSet(false); + this.writeTime = 0; + setSeqNumIsSet(false); + this.seqNum = 0; + } + + public byte[] getTableName() { + setTableName(org.apache.thrift.TBaseHelper.rightSize(tableName)); + return tableName == null ? null : tableName.array(); + } + + public ByteBuffer bufferForTableName() { + return tableName; + } + + public THLogKey setTableName(byte[] tableName) { + setTableName(tableName == null ? (ByteBuffer)null : ByteBuffer.wrap(tableName)); + return this; + } + + public THLogKey setTableName(ByteBuffer tableName) { + this.tableName = tableName; + return this; + } + + public void unsetTableName() { + this.tableName = null; + } + + /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ + public boolean isSetTableName() { + return this.tableName != null; + } + + public void setTableNameIsSet(boolean value) { + if (!value) { + this.tableName = null; + } + } + + public long getWriteTime() { + return this.writeTime; + } + + public THLogKey setWriteTime(long writeTime) { + this.writeTime = writeTime; + setWriteTimeIsSet(true); + return this; + } + + public void unsetWriteTime() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITETIME_ISSET_ID); + } + + /** Returns true if field writeTime is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteTime() { + return EncodingUtils.testBit(__isset_bitfield, __WRITETIME_ISSET_ID); + } + + public void setWriteTimeIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITETIME_ISSET_ID, value); + } + + public long getSeqNum() { + return this.seqNum; + } + + public THLogKey setSeqNum(long seqNum) { + this.seqNum = seqNum; + setSeqNumIsSet(true); + return this; + } + + public void unsetSeqNum() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SEQNUM_ISSET_ID); + } + + /** Returns true if field seqNum is set (has been assigned a value) and false otherwise */ + public boolean isSetSeqNum() { + return EncodingUtils.testBit(__isset_bitfield, __SEQNUM_ISSET_ID); + } + + public void setSeqNumIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SEQNUM_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TABLE_NAME: + if (value == null) { + unsetTableName(); + } else { + setTableName((ByteBuffer)value); + } + break; + + case WRITE_TIME: + if (value == null) { + unsetWriteTime(); + } else { + setWriteTime((Long)value); + } + break; + + case SEQ_NUM: + if (value == null) { + unsetSeqNum(); + } else { + setSeqNum((Long)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TABLE_NAME: + return getTableName(); + + case WRITE_TIME: + return Long.valueOf(getWriteTime()); + + case SEQ_NUM: + return Long.valueOf(getSeqNum()); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TABLE_NAME: + return isSetTableName(); + case WRITE_TIME: + return isSetWriteTime(); + case SEQ_NUM: + return isSetSeqNum(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof THLogKey) + return this.equals((THLogKey)that); + return false; + } + + public boolean equals(THLogKey that) { + if (that == null) + return false; + + boolean this_present_tableName = true && this.isSetTableName(); + boolean that_present_tableName = true && that.isSetTableName(); + if (this_present_tableName || that_present_tableName) { + if (!(this_present_tableName && that_present_tableName)) + return false; + if (!this.tableName.equals(that.tableName)) + return false; + } + + boolean this_present_writeTime = true; + boolean that_present_writeTime = true; + if (this_present_writeTime || that_present_writeTime) { + if (!(this_present_writeTime && that_present_writeTime)) + return false; + if (this.writeTime != that.writeTime) + return false; + } + + boolean this_present_seqNum = true; + boolean that_present_seqNum = true; + if (this_present_seqNum || that_present_seqNum) { + if (!(this_present_seqNum && that_present_seqNum)) + return false; + if (this.seqNum != that.seqNum) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public int compareTo(THLogKey other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTableName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWriteTime()).compareTo(other.isSetWriteTime()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteTime()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeTime, other.writeTime); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetSeqNum()).compareTo(other.isSetSeqNum()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSeqNum()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.seqNum, other.seqNum); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("THLogKey("); + boolean first = true; + + sb.append("tableName:"); + if (this.tableName == null) { + sb.append("null"); + } else { + org.apache.thrift.TBaseHelper.toString(this.tableName, sb); + } + first = false; + if (!first) sb.append(", "); + sb.append("writeTime:"); + sb.append(this.writeTime); + first = false; + if (!first) sb.append(", "); + sb.append("seqNum:"); + sb.append(this.seqNum); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (tableName == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' was not present! Struct: " + toString()); + } + // alas, we cannot check 'writeTime' because it's a primitive and you chose the non-beans generator. + // alas, we cannot check 'seqNum' because it's a primitive and you chose the non-beans generator. + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class THLogKeyStandardSchemeFactory implements SchemeFactory { + public THLogKeyStandardScheme getScheme() { + return new THLogKeyStandardScheme(); + } + } + + private static class THLogKeyStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, THLogKey struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tableName = iprot.readBinary(); + struct.setTableNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // WRITE_TIME + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeTime = iprot.readI64(); + struct.setWriteTimeIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // SEQ_NUM + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.seqNum = iprot.readI64(); + struct.setSeqNumIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + if (!struct.isSetWriteTime()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'writeTime' was not found in serialized data! Struct: " + toString()); + } + if (!struct.isSetSeqNum()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'seqNum' was not found in serialized data! Struct: " + toString()); + } + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, THLogKey struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.tableName != null) { + oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); + oprot.writeBinary(struct.tableName); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(WRITE_TIME_FIELD_DESC); + oprot.writeI64(struct.writeTime); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(SEQ_NUM_FIELD_DESC); + oprot.writeI64(struct.seqNum); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class THLogKeyTupleSchemeFactory implements SchemeFactory { + public THLogKeyTupleScheme getScheme() { + return new THLogKeyTupleScheme(); + } + } + + private static class THLogKeyTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, THLogKey struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeBinary(struct.tableName); + oprot.writeI64(struct.writeTime); + oprot.writeI64(struct.seqNum); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, THLogKey struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.tableName = iprot.readBinary(); + struct.setTableNameIsSet(true); + struct.writeTime = iprot.readI64(); + struct.setWriteTimeIsSet(true); + struct.seqNum = iprot.readI64(); + struct.setSeqNumIsSet(true); + } + } + +} + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TIOError.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TIOError.java new file mode 100644 index 0000000..c2fcca6 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TIOError.java @@ -0,0 +1,391 @@ +/** + * Autogenerated by Thrift Compiler (0.9.1) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hbase.replication.thrift.generated; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A TIOError exception signals that an error occurred communicating + * to the HBase master or a HBase region server. Also used to return + * more general HBase error conditions. + */ +public class TIOError extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIOError"); + + private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TIOErrorStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TIOErrorTupleSchemeFactory()); + } + + public String message; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + MESSAGE((short)1, "message"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // MESSAGE + return MESSAGE; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private _Fields optionals[] = {_Fields.MESSAGE}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TIOError.class, metaDataMap); + } + + public TIOError() { + } + + /** + * Performs a deep copy on other. + */ + public TIOError(TIOError other) { + if (other.isSetMessage()) { + this.message = other.message; + } + } + + public TIOError deepCopy() { + return new TIOError(this); + } + + @Override + public void clear() { + this.message = null; + } + + public String getMessage() { + return this.message; + } + + public TIOError setMessage(String message) { + this.message = message; + return this; + } + + public void unsetMessage() { + this.message = null; + } + + /** Returns true if field message is set (has been assigned a value) and false otherwise */ + public boolean isSetMessage() { + return this.message != null; + } + + public void setMessageIsSet(boolean value) { + if (!value) { + this.message = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case MESSAGE: + if (value == null) { + unsetMessage(); + } else { + setMessage((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case MESSAGE: + return getMessage(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case MESSAGE: + return isSetMessage(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TIOError) + return this.equals((TIOError)that); + return false; + } + + public boolean equals(TIOError that) { + if (that == null) + return false; + + boolean this_present_message = true && this.isSetMessage(); + boolean that_present_message = true && that.isSetMessage(); + if (this_present_message || that_present_message) { + if (!(this_present_message && that_present_message)) + return false; + if (!this.message.equals(that.message)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public int compareTo(TIOError other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMessage()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TIOError("); + boolean first = true; + + if (isSetMessage()) { + sb.append("message:"); + if (this.message == null) { + sb.append("null"); + } else { + sb.append(this.message); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TIOErrorStandardSchemeFactory implements SchemeFactory { + public TIOErrorStandardScheme getScheme() { + return new TIOErrorStandardScheme(); + } + } + + private static class TIOErrorStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TIOError struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // MESSAGE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.message = iprot.readString(); + struct.setMessageIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TIOError struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.message != null) { + if (struct.isSetMessage()) { + oprot.writeFieldBegin(MESSAGE_FIELD_DESC); + oprot.writeString(struct.message); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TIOErrorTupleSchemeFactory implements SchemeFactory { + public TIOErrorTupleScheme getScheme() { + return new TIOErrorTupleScheme(); + } + } + + private static class TIOErrorTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TIOError struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetMessage()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetMessage()) { + oprot.writeString(struct.message); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TIOError struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.message = iprot.readString(); + struct.setMessageIsSet(true); + } + } + } + +} + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TIllegalArgument.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TIllegalArgument.java new file mode 100644 index 0000000..ade826f --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TIllegalArgument.java @@ -0,0 +1,390 @@ +/** + * Autogenerated by Thrift Compiler (0.9.1) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hbase.replication.thrift.generated; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A TIllegalArgument exception indicates an illegal or invalid + * argument was passed into a procedure. + */ +public class TIllegalArgument extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TIllegalArgument"); + + private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TIllegalArgumentStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TIllegalArgumentTupleSchemeFactory()); + } + + public String message; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + MESSAGE((short)1, "message"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // MESSAGE + return MESSAGE; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private _Fields optionals[] = {_Fields.MESSAGE}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TIllegalArgument.class, metaDataMap); + } + + public TIllegalArgument() { + } + + /** + * Performs a deep copy on other. + */ + public TIllegalArgument(TIllegalArgument other) { + if (other.isSetMessage()) { + this.message = other.message; + } + } + + public TIllegalArgument deepCopy() { + return new TIllegalArgument(this); + } + + @Override + public void clear() { + this.message = null; + } + + public String getMessage() { + return this.message; + } + + public TIllegalArgument setMessage(String message) { + this.message = message; + return this; + } + + public void unsetMessage() { + this.message = null; + } + + /** Returns true if field message is set (has been assigned a value) and false otherwise */ + public boolean isSetMessage() { + return this.message != null; + } + + public void setMessageIsSet(boolean value) { + if (!value) { + this.message = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case MESSAGE: + if (value == null) { + unsetMessage(); + } else { + setMessage((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case MESSAGE: + return getMessage(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case MESSAGE: + return isSetMessage(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TIllegalArgument) + return this.equals((TIllegalArgument)that); + return false; + } + + public boolean equals(TIllegalArgument that) { + if (that == null) + return false; + + boolean this_present_message = true && this.isSetMessage(); + boolean that_present_message = true && that.isSetMessage(); + if (this_present_message || that_present_message) { + if (!(this_present_message && that_present_message)) + return false; + if (!this.message.equals(that.message)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public int compareTo(TIllegalArgument other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMessage()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TIllegalArgument("); + boolean first = true; + + if (isSetMessage()) { + sb.append("message:"); + if (this.message == null) { + sb.append("null"); + } else { + sb.append(this.message); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TIllegalArgumentStandardSchemeFactory implements SchemeFactory { + public TIllegalArgumentStandardScheme getScheme() { + return new TIllegalArgumentStandardScheme(); + } + } + + private static class TIllegalArgumentStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TIllegalArgument struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // MESSAGE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.message = iprot.readString(); + struct.setMessageIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TIllegalArgument struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.message != null) { + if (struct.isSetMessage()) { + oprot.writeFieldBegin(MESSAGE_FIELD_DESC); + oprot.writeString(struct.message); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TIllegalArgumentTupleSchemeFactory implements SchemeFactory { + public TIllegalArgumentTupleScheme getScheme() { + return new TIllegalArgumentTupleScheme(); + } + } + + private static class TIllegalArgumentTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TIllegalArgument struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetMessage()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetMessage()) { + oprot.writeString(struct.message); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TIllegalArgument struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.message = iprot.readString(); + struct.setMessageIsSet(true); + } + } + } + +} + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TWalLEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TWalLEdit.java new file mode 100644 index 0000000..e47e036 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/thrift/generated/TWalLEdit.java @@ -0,0 +1,439 @@ +/** + * Autogenerated by Thrift Compiler (0.9.1) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hbase.replication.thrift.generated; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Mapping for WALEdit + * + */ +public class TWalLEdit implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TWalLEdit"); + + private static final org.apache.thrift.protocol.TField MUTATIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("mutations", org.apache.thrift.protocol.TType.LIST, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TWalLEditStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TWalLEditTupleSchemeFactory()); + } + + public List mutations; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + MUTATIONS((short)1, "mutations"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // MUTATIONS + return MUTATIONS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.MUTATIONS, new org.apache.thrift.meta_data.FieldMetaData("mutations", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumnValue.class)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TWalLEdit.class, metaDataMap); + } + + public TWalLEdit() { + } + + public TWalLEdit( + List mutations) + { + this(); + this.mutations = mutations; + } + + /** + * Performs a deep copy on other. + */ + public TWalLEdit(TWalLEdit other) { + if (other.isSetMutations()) { + List __this__mutations = new ArrayList(other.mutations.size()); + for (TColumnValue other_element : other.mutations) { + __this__mutations.add(new TColumnValue(other_element)); + } + this.mutations = __this__mutations; + } + } + + public TWalLEdit deepCopy() { + return new TWalLEdit(this); + } + + @Override + public void clear() { + this.mutations = null; + } + + public int getMutationsSize() { + return (this.mutations == null) ? 0 : this.mutations.size(); + } + + public java.util.Iterator getMutationsIterator() { + return (this.mutations == null) ? null : this.mutations.iterator(); + } + + public void addToMutations(TColumnValue elem) { + if (this.mutations == null) { + this.mutations = new ArrayList(); + } + this.mutations.add(elem); + } + + public List getMutations() { + return this.mutations; + } + + public TWalLEdit setMutations(List mutations) { + this.mutations = mutations; + return this; + } + + public void unsetMutations() { + this.mutations = null; + } + + /** Returns true if field mutations is set (has been assigned a value) and false otherwise */ + public boolean isSetMutations() { + return this.mutations != null; + } + + public void setMutationsIsSet(boolean value) { + if (!value) { + this.mutations = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case MUTATIONS: + if (value == null) { + unsetMutations(); + } else { + setMutations((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case MUTATIONS: + return getMutations(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case MUTATIONS: + return isSetMutations(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TWalLEdit) + return this.equals((TWalLEdit)that); + return false; + } + + public boolean equals(TWalLEdit that) { + if (that == null) + return false; + + boolean this_present_mutations = true && this.isSetMutations(); + boolean that_present_mutations = true && that.isSetMutations(); + if (this_present_mutations || that_present_mutations) { + if (!(this_present_mutations && that_present_mutations)) + return false; + if (!this.mutations.equals(that.mutations)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public int compareTo(TWalLEdit other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetMutations()).compareTo(other.isSetMutations()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMutations()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mutations, other.mutations); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TWalLEdit("); + boolean first = true; + + sb.append("mutations:"); + if (this.mutations == null) { + sb.append("null"); + } else { + sb.append(this.mutations); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (mutations == null) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'mutations' was not present! Struct: " + toString()); + } + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TWalLEditStandardSchemeFactory implements SchemeFactory { + public TWalLEditStandardScheme getScheme() { + return new TWalLEditStandardScheme(); + } + } + + private static class TWalLEditStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TWalLEdit struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // MUTATIONS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list0 = iprot.readListBegin(); + struct.mutations = new ArrayList(_list0.size); + for (int _i1 = 0; _i1 < _list0.size; ++_i1) + { + TColumnValue _elem2; + _elem2 = new TColumnValue(); + _elem2.read(iprot); + struct.mutations.add(_elem2); + } + iprot.readListEnd(); + } + struct.setMutationsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + + // check for required fields of primitive type, which can't be checked in the validate method + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TWalLEdit struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.mutations != null) { + oprot.writeFieldBegin(MUTATIONS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.mutations.size())); + for (TColumnValue _iter3 : struct.mutations) + { + _iter3.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TWalLEditTupleSchemeFactory implements SchemeFactory { + public TWalLEditTupleScheme getScheme() { + return new TWalLEditTupleScheme(); + } + } + + private static class TWalLEditTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TWalLEdit struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.mutations.size()); + for (TColumnValue _iter4 : struct.mutations) + { + _iter4.write(oprot); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TWalLEdit struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list5 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.mutations = new ArrayList(_list5.size); + for (int _i6 = 0; _i6 < _list5.size; ++_i6) + { + TColumnValue _elem7; + _elem7 = new TColumnValue(); + _elem7.read(iprot); + struct.mutations.add(_elem7); + } + } + struct.setMutationsIsSet(true); + } + } + +} + diff --git a/hbase-server/src/main/resources/org/apache/hadoop/hbase/thrift/hbase.thrift b/hbase-server/src/main/resources/org/apache/hadoop/hbase/thrift/hbase.thrift new file mode 100644 index 0000000..2c18286 --- /dev/null +++ b/hbase-server/src/main/resources/org/apache/hadoop/hbase/thrift/hbase.thrift @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// NOTE: The "required" and "optional" keywords for the service methods are purely for documentation + +namespace java org.apache.hadoop.hbase.replication.thrift.generated + +struct TClusterId { + 1: i64 lb, + 2: i64 ub +} + +/** + * Represents a single cell and its value. + */ +struct TColumnValue { + 1: required binary row + 2: required binary family, + 3: required binary qualifier, + 4: required binary value, + 5: optional i64 timestamp, + 6: required byte type +} + +/** + * Mapping for HLogKey + * +*/ +struct THLogKey { + 1: required binary tableName, + 2: required i64 writeTime, + 3: required i64 seqNum +} + +/** + * Mapping for WALEdit + * +*/ +struct TWalLEdit { + 1: required list mutations, +} + +struct TEdit { + 1: required THLogKey hLogKey, + 2: required TWalLEdit walEdit + 3: required list clusterIds +} + +struct TBatchEdit { + 1: required list edits +} + +// +// Exceptions +// + +/** + * A TIOError exception signals that an error occurred communicating + * to the HBase master or a HBase region server. Also used to return + * more general HBase error conditions. + */ +exception TIOError { + 1: optional string message +} + +/** + * A TIllegalArgument exception indicates an illegal or invalid + * argument was passed into a procedure. + */ +exception TIllegalArgument { + 1: optional string message +} + +service THBaseService { + + void replicate( + 1: required TBatchEdit edits ) throws (1: TIOError io) + + void ping() + + string getClusterUUID() +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java index 682c276..43b7323 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java @@ -25,6 +25,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.replication.ReplicationPeer; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.BeforeClass; import org.junit.Test; @@ -125,6 +126,14 @@ public class TestReplicationAdmin { } @Test + public void testSettingThePeerProtocol() throws Exception { + admin.addPeer(ID_ONE, KEY_ONE, " ", ReplicationPeer.PeerProtocol.THRIFT.name()); + assertEquals(1, admin.getPeersCount()); + assertTrue(admin.getPeerState(ID_ONE)); + assertEquals(ReplicationPeer.PeerProtocol.THRIFT, admin.getPeerConfig(ID_ONE).getProtocol()); + } + + @Test public void testGetTableCfsStr() { // opposite of TestPerTableCFReplication#testParseTableCFsFromConfig() diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java index f7374b2..23582a4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSink.java @@ -98,7 +98,11 @@ public class TestReplicationSink { HConstants.REPLICATION_ENABLE_DEFAULT); TEST_UTIL.startMiniCluster(3); SINK = - new ReplicationSink(new Configuration(TEST_UTIL.getConfiguration()), STOPPABLE); + new ReplicationSink( + new Configuration(TEST_UTIL.getConfiguration()), + STOPPABLE, + java.util.UUID.randomUUID().toString() + ); table1 = TEST_UTIL.createTable(TABLE_NAME1, FAM_NAME1); table2 = TEST_UTIL.createTable(TABLE_NAME2, FAM_NAME2); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/ReplicationTestUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/ReplicationTestUtils.java new file mode 100644 index 0000000..4bbcdbf --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/ReplicationTestUtils.java @@ -0,0 +1,201 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication.thrift; + +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.client.*; +import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.replication.ReplicationException; +import org.apache.hadoop.hbase.replication.ReplicationPeer; +import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationSink; +import org.apache.hadoop.hbase.util.Bytes; + +import java.io.IOException; +import java.util.List; +import java.util.Set; +import java.util.UUID; + +import static com.google.common.collect.Iterables.getOnlyElement; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.fail; + +public class ReplicationTestUtils { + + + private static final Log LOG = LogFactory.getLog(ReplicationTestUtils.class); + public static final byte[] DEFAULT_FAMILY = Bytes.toBytes("test_family"); + public static final byte[] DEFAULT_QUALIFIER = Bytes.toBytes("test_qual"); + static final long SLEEP_TIME = 100; + static final int NB_RETRIES = 200; + + public static Configuration setupConfiguration(HBaseTestingUtility cluster, int thriftServerPort) { + Configuration configuration = cluster.getConfiguration(); + configuration.setInt("hbase.replication.thrift.server.port", thriftServerPort); + configuration.setBoolean(ReplicationSink.CONF_KEY_REPLICATION_THRIFT, true); + configuration.setBoolean("hbase.replication", true); + return configuration; + } + + public static void addPeerThriftPort(HBaseTestingUtility cluster, String peerId, int port) { + cluster.getConfiguration().setInt("hbase.replication.thrift.peer." + peerId + ".port", port); + } + + public static HTableDescriptor createTestTable() throws Exception { + HTableDescriptor table = new HTableDescriptor("test_table"); + HColumnDescriptor fam = new HColumnDescriptor(DEFAULT_FAMILY); + fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); + fam.setMaxVersions(1); + table.addFamily(fam); + return table; + } + + public static void createTableOnCluster(HBaseTestingUtility cluster, HTableDescriptor table) + throws IOException { + new HBaseAdmin(cluster.getConfiguration()).createTable(table); + } + + public static HTable getTestTable(HBaseTestingUtility cluster, HTableDescriptor table) + throws IOException { + HTable result = new HTable(cluster.getConfiguration(), table.getName()); + result.setWriteBufferSize(1024); + return result; + } + + public static void addReplicationPeer(String peerId, HBaseTestingUtility source, + HBaseTestingUtility destination) throws IOException, ReplicationException { + addReplicationPeer(peerId, source, destination, ReplicationPeer.PeerProtocol.THRIFT); + } + + public static void addReplicationPeer(String peerId, HBaseTestingUtility source, + HBaseTestingUtility destination, ReplicationPeer.PeerProtocol protocol) + throws IOException, ReplicationException { + Configuration configuration = source.getConfiguration(); + + ReplicationAdmin admin = new ReplicationAdmin(configuration); + String endpoint = HBaseInterClusterReplicationEndpoint.class.getName(); + if (protocol.getProtocol() == ZooKeeperProtos.ReplicationPeer.Protocol.THRIFT) { + endpoint = ThriftHBaseReplicationEndpoint.class.getName(); + } + ReplicationPeerConfig config = + new ReplicationPeerConfig().setClusterKey(destination.getClusterKey()) + .setProtocol(protocol).setReplicationEndpointImpl(endpoint); + admin.addPeer(peerId,config, null); + } + + public static Result putAndWait(Put put, String value, HTable source, HTable target) throws Exception { + return putAndWait(put, value, false, source, target); + } + + public static Result putAndWait(Put put, String value, boolean compareTimestamps, HTable source, HTable target) throws Exception { + source.put(put); + + Get get = new Get(put.getRow()); + for (int i = 0; i < NB_RETRIES; i++) { + Result res = target.get(get); + if (res.size() == 0) { + LOG.info("Row not available"); + Thread.sleep(SLEEP_TIME); + } else { + if (compareTimestamps) { + if (getOnlyElement(res.getColumn(DEFAULT_FAMILY, DEFAULT_QUALIFIER)).getTimestamp() + != getOnlyElement(put.get(DEFAULT_FAMILY, DEFAULT_QUALIFIER)).getTimestamp()) { + LOG.info("Cell timestamps don't match... wait some more"); + Thread.sleep(SLEEP_TIME); + continue; + } + assertArrayEquals(res.value(), Bytes.toBytes(value)); + } + return res; + } + } + throw new RuntimeException("Waited too much time for put replication"); + } + + + public static void deleteAndWait(byte[] row, HTable source, HTable target) + throws Exception { + Delete del = new Delete(row); + source.delete(del); + + Get get = new Get(row); + for (int i = 0; i < NB_RETRIES; i++) { + if (i==NB_RETRIES-1) { + fail("Waited too much time for del replication"); + } + Result res = target.get(get); + if (res.size() >= 1) { + LOG.info("Row not deleted"); + Thread.sleep(SLEEP_TIME); + } else { + break; + } + } + } + + public static void assertContainsOnly(HTable table, Set values) throws Exception { + for (int i = 0; i < NB_RETRIES; i++) { + Set valuesCopy = Sets.newHashSet(values); + ResultScanner scanner = table.getScanner(new Scan()); + Result result; + int tableSize = 0; + int valuesSize = valuesCopy.size(); + List inTableNotExpected = Lists.newArrayList(); + while ((result = scanner.next()) != null) { + String value = Bytes.toString(result.getValue(DEFAULT_FAMILY, DEFAULT_QUALIFIER)); + boolean removed = valuesCopy.remove(value); + if (!removed) { + inTableNotExpected.add(value); + } + tableSize++; + } + if (!valuesCopy.isEmpty()) { + LOG.warn("Table did not have expected values: " + valuesCopy); + Thread.sleep(SLEEP_TIME); + } else if (tableSize != valuesSize) { + LOG.warn("Table had more values (" + tableSize + ") than expected: " + inTableNotExpected); + Thread.sleep(SLEEP_TIME); + } else { + return; + } + } + fail("Waited too much time for replication to sync up"); + } + + public static Put generateRandomPut(String value) { + return generateRandomPut(value, System.currentTimeMillis()); + } + + public static Put generateRandomPut(String value, long timestamp) { + Put put = new Put(Bytes.toBytes(UUID.randomUUID().toString())); + put.add(DEFAULT_FAMILY, DEFAULT_QUALIFIER, timestamp, Bytes.toBytes(value)); + return put; + } + + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftCyclicReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftCyclicReplication.java new file mode 100644 index 0000000..8ab8022 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftCyclicReplication.java @@ -0,0 +1,126 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication.thrift; + +import com.google.common.collect.Sets; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.util.Set; + +import static org.apache.hadoop.hbase.replication.thrift.ReplicationTestUtils.*; +import static org.apache.hadoop.hbase.replication.thrift.ReplicationTestUtils.assertContainsOnly; +import static org.apache.hadoop.hbase.replication.thrift.ReplicationTestUtils.deleteAndWait; + +@Category(MediumTests.class) +public class TestThriftCyclicReplication extends TestThriftReplicationBase { + + private final static HBaseTestingUtility clusterA = new HBaseTestingUtility(); + private final static HBaseTestingUtility clusterB = new HBaseTestingUtility(); + private final static HBaseTestingUtility clusterC = new HBaseTestingUtility(); + + private static HTable tableA; + private static HTable tableB; + private static HTable tableC; + + @BeforeClass + public static void setUpClazz() throws Exception { + + int clusterAServerPort = HBaseTestingUtility.randomFreePort(); + int clusterBServerPort = HBaseTestingUtility.randomFreePort(); + int clusterCServerPort = HBaseTestingUtility.randomFreePort(); + + setupConfiguration(clusterA, clusterAServerPort); + setupConfiguration(clusterB, clusterBServerPort); + setupConfiguration(clusterC, clusterCServerPort); + + addPeerThriftPort(clusterA, "1", clusterBServerPort); + addPeerThriftPort(clusterB, "1", clusterCServerPort); + addPeerThriftPort(clusterC, "1", clusterAServerPort); + + HTableDescriptor table = createTestTable(); + + clusterA.startMiniCluster(); + clusterB.startMiniCluster(); + clusterC.startMiniCluster(); + + createTableOnCluster(clusterA, table); + createTableOnCluster(clusterB, table); + createTableOnCluster(clusterC, table); + + tableA = getTestTable(clusterA, table); + tableB = getTestTable(clusterB, table); + tableC = getTestTable(clusterC, table); + + addReplicationPeer("1", clusterA, clusterB); + addReplicationPeer("1", clusterB, clusterC); + addReplicationPeer("1", clusterC, clusterA); + } + + @Before + public void setUp() throws Exception { + clusterA.truncateTable(tableA.getTableName()); + clusterB.truncateTable(tableB.getTableName()); + + } + + @AfterClass + public static void tearDown() throws Exception { + clusterA.shutdownMiniCluster(); + clusterB.shutdownMiniCluster(); + clusterC.shutdownMiniCluster(); + } + + @Test + public void testCyclicReplication() throws Exception { + String firstRow = "firstRow"; + putAndWait(generateRandomPut(firstRow), firstRow, tableA, tableB); + + String secondRow = "secondRow"; + putAndWait(generateRandomPut(secondRow), secondRow, tableB, tableC); + + String thirdRow = "thirdRow"; + Put lastPut = generateRandomPut(thirdRow); + putAndWait(lastPut, thirdRow, tableC, tableA); + + Set expected = Sets.newHashSet(firstRow, secondRow, thirdRow); + assertContainsOnly(tableA, expected); + assertContainsOnly(tableB, expected); + assertContainsOnly(tableC, expected); + + // lets delete one of those rows and verify it goes around + + Delete delete = new Delete(lastPut.getRow()); + deleteAndWait(delete.getRow(), tableB, tableC); + + expected = Sets.newHashSet(firstRow, secondRow); + assertContainsOnly(tableB, expected); + assertContainsOnly(tableC, expected); + assertContainsOnly(tableA, expected); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftMasterMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftMasterMasterReplication.java new file mode 100644 index 0000000..6c6b011 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftMasterMasterReplication.java @@ -0,0 +1,104 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication.thrift; +import com.google.common.collect.Iterables; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.*; +import org.junit.experimental.categories.Category; + +import static org.apache.hadoop.hbase.replication.thrift.ReplicationTestUtils.*; +import static org.junit.Assert.assertEquals; + + + +@Category(MediumTests.class) +public class TestThriftMasterMasterReplication extends TestThriftReplicationBase { + + private final static HBaseTestingUtility clusterA = new HBaseTestingUtility(); + private final static HBaseTestingUtility clusterB = new HBaseTestingUtility(); + + private static HTable tableA; + private static HTable tableB; + + @BeforeClass + public static void setUpClazz() throws Exception { + + int clusterAServerPort = HBaseTestingUtility.randomFreePort(); + int clusterBServerPort = HBaseTestingUtility.randomFreePort(); + + setupConfiguration(clusterA, clusterAServerPort); + setupConfiguration(clusterB, clusterBServerPort); + + addPeerThriftPort(clusterA, "1", clusterBServerPort); + addPeerThriftPort(clusterB, "1", clusterAServerPort); + + HTableDescriptor table = createTestTable(); + + clusterA.startMiniCluster(); + clusterB.startMiniCluster(); + + createTableOnCluster(clusterA, table); + createTableOnCluster(clusterB, table); + + tableA = getTestTable(clusterA, table); + tableB = getTestTable(clusterB, table); + + addReplicationPeer("1", clusterA, clusterB); + addReplicationPeer("1", clusterB, clusterA); + } + + @Before + public void setUp() throws Exception { + clusterA.truncateTable(tableA.getTableName()); + clusterB.truncateTable(tableB.getTableName()); + + } + + @AfterClass + public static void tearDown() throws Exception { + clusterA.shutdownMiniCluster(); + clusterB.shutdownMiniCluster(); + } + + @Test + public void testMasterMasterReplication() throws Exception { + long originalTimestamp = 1l; + String rowKey = "master-master-key"; + String value = "testMasterMaster"; + Put originalPut = new Put(Bytes.toBytes(rowKey)); + originalPut.add(DEFAULT_FAMILY, DEFAULT_QUALIFIER, originalTimestamp, Bytes.toBytes(value)); + Result originalResult = putAndWait(originalPut, value, true, tableA, tableB); + KeyValue originalKeyVal = + Iterables.getOnlyElement(originalResult.getColumn(DEFAULT_FAMILY, DEFAULT_QUALIFIER)); + assertEquals(originalTimestamp, originalKeyVal.getTimestamp()); + + long newTimestamp = 2l; + Put overwritePut = new Put(Bytes.toBytes(rowKey)); + overwritePut.add(DEFAULT_FAMILY, DEFAULT_QUALIFIER, newTimestamp, Bytes.toBytes(value)); + putAndWait(overwritePut, value, true, tableB, tableA); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftMultiSlaveReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftMultiSlaveReplication.java new file mode 100644 index 0000000..1ebae5b --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftMultiSlaveReplication.java @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication.thrift; + +import com.google.common.collect.Sets; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.*; +import org.junit.experimental.categories.Category; + +import java.util.Set; + +import static org.apache.hadoop.hbase.replication.thrift.ReplicationTestUtils.*; +import static org.apache.hadoop.hbase.replication.thrift.ReplicationTestUtils.addReplicationPeer; +@Category(MediumTests.class) +public class TestThriftMultiSlaveReplication extends TestThriftReplicationBase { + + private final static HBaseTestingUtility clusterA = new HBaseTestingUtility(); + private final static HBaseTestingUtility clusterB = new HBaseTestingUtility(); + private final static HBaseTestingUtility clusterC = new HBaseTestingUtility(); + + private static HTable tableA; + private static HTable tableB; + private static HTable tableC; + + @BeforeClass + public static void setUp() throws Exception { + + int clusterAServerPort = HBaseTestingUtility.randomFreePort(); + int clusterBServerPort = HBaseTestingUtility.randomFreePort(); + int clusterCServerPort = HBaseTestingUtility.randomFreePort(); + + setupConfiguration(clusterA, clusterAServerPort); + setupConfiguration(clusterB, clusterBServerPort); + setupConfiguration(clusterC, clusterCServerPort); + + addPeerThriftPort(clusterA, "1", clusterBServerPort); + addPeerThriftPort(clusterA, "2", clusterCServerPort); + + HTableDescriptor table = createTestTable(); + + clusterA.startMiniCluster(); + clusterB.startMiniCluster(); + clusterC.startMiniCluster(); + + createTableOnCluster(clusterA, table); + createTableOnCluster(clusterB, table); + createTableOnCluster(clusterC, table); + + tableA = getTestTable(clusterA, table); + tableB = getTestTable(clusterB, table); + tableC = getTestTable(clusterC, table); + + addReplicationPeer("1", clusterA, clusterB); + addReplicationPeer("2", clusterA, clusterC); + } + + @Test + public void testReplicatingToMultipleSlaves() throws Exception { + String firstRow = "firstRow"; + putAndWait(generateRandomPut(firstRow), firstRow, tableA, tableB); + + // make sure all tables have that one element only + Set expected = Sets.newHashSet(firstRow); + assertContainsOnly(tableA, expected); + assertContainsOnly(tableB, expected); + assertContainsOnly(tableC, expected); + + String secondRow = "secondRow"; + Put lastPut = generateRandomPut(secondRow); + putAndWait(lastPut, secondRow, tableA, tableB); + + // all tables should have both elements + Set otherExpected = Sets.newHashSet(firstRow, secondRow); + assertContainsOnly(tableA, otherExpected); + assertContainsOnly(tableB, otherExpected); + assertContainsOnly(tableC, otherExpected); + + // lets delete one of those rows and verify it goes replicated to both slaves + Delete delete = new Delete(lastPut.getRow()); + deleteAndWait(delete.getRow(), tableA, tableB); + + assertContainsOnly(tableA, expected); + assertContainsOnly(tableB, expected); + assertContainsOnly(tableC, expected); + } + + @AfterClass + public static void tearDown() throws Exception { + clusterA.shutdownMiniCluster(); + clusterB.shutdownMiniCluster(); + clusterC.shutdownMiniCluster(); + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftReplicationBase.java new file mode 100644 index 0000000..82d7643 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftReplicationBase.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication.thrift; + +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.BeforeClass; +import org.junit.experimental.categories.Category; + +@Category(SmallTests.class) +public class TestThriftReplicationBase { + + @BeforeClass + public static void setUpKlazz() throws Exception { + // Error level to skip some warnings specific to the minicluster. See HBASE-4709 + org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.util.MBeans.class).setLevel( org.apache.log4j.Level.ERROR); + org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).setLevel(org.apache.log4j.Level.ERROR); + + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftReplicationSink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftReplicationSink.java new file mode 100644 index 0000000..319b374 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/TestThriftReplicationSink.java @@ -0,0 +1,265 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.replication.thrift; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.client.*; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationSink; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.ByteStringer; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +@Category(MediumTests.class) +public class TestThriftReplicationSink extends TestThriftReplicationBase { + + private static final Log LOG = LogFactory.getLog(TestThriftReplicationSink.class); + private static final int BATCH_SIZE = 10; + + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private static ReplicationSink SINK; + + private static final byte[] TABLE_NAME1 = Bytes.toBytes("table1"); + private static final byte[] TABLE_NAME2 = Bytes.toBytes("table2"); + + private static final byte[] FAM_NAME1 = Bytes.toBytes("info1"); + private static final byte[] FAM_NAME2 = Bytes.toBytes("info2"); + + private static HTable table1; + private static HTable table2; + private static Stoppable STOPPABLE = new Stoppable() { + final AtomicBoolean stop = new AtomicBoolean(false); + + @Override + public boolean isStopped() { + return this.stop.get(); + } + + @Override + public void stop(String why) { + LOG.info("STOPPING BECAUSE: " + why); + this.stop.set(true); + } + + }; + + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true); + TEST_UTIL.getConfiguration().setBoolean(HConstants.REPLICATION_ENABLE_KEY, true); + ReplicationTestUtils.setupConfiguration(TEST_UTIL, HBaseTestingUtility.randomFreePort()); + TEST_UTIL.startMiniCluster(1); + + Configuration sinkConfiguration = new Configuration(TEST_UTIL.getConfiguration()); + sinkConfiguration.setInt("hbase.replication.thrift.server.port", HBaseTestingUtility.randomFreePort()); + SINK = new ReplicationSink(sinkConfiguration, STOPPABLE, UUID.randomUUID().toString()); + table1 = TEST_UTIL.createTable(TABLE_NAME1, FAM_NAME1); + table2 = TEST_UTIL.createTable(TABLE_NAME2, FAM_NAME2); + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + STOPPABLE.stop("Shutting down"); + TEST_UTIL.shutdownMiniCluster(); + } + + /** + * @throws java.lang.Exception + */ + @Before + public void setUp() throws Exception { + table1 = TEST_UTIL.truncateTable(TABLE_NAME1); + table2 = TEST_UTIL.truncateTable(TABLE_NAME2); + } + + /** + * Insert a whole batch of entries + * @throws Exception + */ + @Test + public void testBatchSink() throws Exception { + List entries = new ArrayList(BATCH_SIZE); + List cells = new ArrayList(); + for(int i = 0; i < BATCH_SIZE; i++) { + entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); + } + SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); + Scan scan = new Scan(); + ResultScanner scanRes = table1.getScanner(scan); + assertEquals(BATCH_SIZE, scanRes.next(BATCH_SIZE).length); + } + + /** + * Insert a mix of puts and deletes + * @throws Exception + */ + @Test + public void testMixedPutDelete() throws Exception { + List entries = new ArrayList(BATCH_SIZE/2); + List cells = new ArrayList(); + for(int i = 0; i < BATCH_SIZE/2; i++) { + entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); + } + SINK.replicateEntries(entries, CellUtil.createCellScanner(cells)); + + entries = new ArrayList(BATCH_SIZE); + cells = new ArrayList(); + for(int i = 0; i < BATCH_SIZE; i++) { + entries.add(createEntry(TABLE_NAME1, i, + i % 2 != 0 ? KeyValue.Type.Put: KeyValue.Type.DeleteColumn, cells)); + } + + SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); + Scan scan = new Scan(); + ResultScanner scanRes = table1.getScanner(scan); + assertEquals(BATCH_SIZE/2, scanRes.next(BATCH_SIZE).length); + } + + /** + * Insert to 2 different tables + * @throws Exception + */ + @Test + public void testMixedPutTables() throws Exception { + List entries = new ArrayList(BATCH_SIZE/2); + List cells = new ArrayList(); + for(int i = 0; i < BATCH_SIZE; i++) { + entries.add(createEntry( i % 2 == 0 ? TABLE_NAME2 : TABLE_NAME1, + i, KeyValue.Type.Put, cells)); + } + + SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); + Scan scan = new Scan(); + ResultScanner scanRes = table2.getScanner(scan); + for(Result res : scanRes) { + assertTrue(Bytes.toInt(res.getRow()) % 2 == 0); + } + } + + /** + * Insert then do different types of deletes + * @throws Exception + */ + @Test + public void testMixedDeletes() throws Exception { + List entries = new ArrayList(3); + List cells = new ArrayList(); + for(int i = 0; i < 3; i++) { + entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); + } + SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); + entries = new ArrayList(3); + cells = new ArrayList(); + entries.add(createEntry(TABLE_NAME1, 0, KeyValue.Type.DeleteColumn, cells)); + entries.add(createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily, cells)); + entries.add(createEntry(TABLE_NAME1, 2, KeyValue.Type.DeleteColumn, cells)); + + SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); + + Scan scan = new Scan(); + ResultScanner scanRes = table1.getScanner(scan); + assertEquals(0, scanRes.next(3).length); + } + + /** + * Puts are buffered, but this tests when a delete (not-buffered) is applied + * before the actual Put that creates it. + * @throws Exception + */ + @Test + public void testApplyDeleteBeforePut() throws Exception { + List entries = new ArrayList(5); + List cells = new ArrayList(); + for(int i = 0; i < 2; i++) { + entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); + } + entries.add(createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily, cells)); + for(int i = 3; i < 5; i++) { + entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); + } + SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); + Get get = new Get(Bytes.toBytes(1)); + Result res = table1.get(get); + assertEquals(0, res.size()); + } + + private AdminProtos.WALEntry createEntry(byte [] table, int row, KeyValue.Type type, List cells) { + byte[] fam = Bytes.equals(table, TABLE_NAME1) ? FAM_NAME1 : FAM_NAME2; + byte[] rowBytes = Bytes.toBytes(row); + // Just make sure we don't get the same ts for two consecutive rows with + // same key + try { + Thread.sleep(1); + } catch (InterruptedException e) { + LOG.info("Was interrupted while sleep, meh", e); + } + final long now = System.currentTimeMillis(); + KeyValue kv = null; + if(type.getCode() == KeyValue.Type.Put.getCode()) { + kv = new KeyValue(rowBytes, fam, fam, now, + KeyValue.Type.Put, Bytes.toBytes(row)); + } else if (type.getCode() == KeyValue.Type.DeleteColumn.getCode()) { + kv = new KeyValue(rowBytes, fam, fam, + now, KeyValue.Type.DeleteColumn); + } else if (type.getCode() == KeyValue.Type.DeleteFamily.getCode()) { + kv = new KeyValue(rowBytes, fam, null, + now, KeyValue.Type.DeleteFamily); + } + AdminProtos.WALEntry.Builder builder = AdminProtos.WALEntry.newBuilder(); + builder.setAssociatedCellCount(1); + WALProtos.WALKey.Builder keyBuilder = WALProtos.WALKey.newBuilder(); + HBaseProtos.UUID.Builder uuidBuilder = HBaseProtos.UUID.newBuilder(); + uuidBuilder.setLeastSigBits(HConstants.DEFAULT_CLUSTER_ID.getLeastSignificantBits()); + uuidBuilder.setMostSigBits(HConstants.DEFAULT_CLUSTER_ID.getMostSignificantBits()); + keyBuilder.setClusterId(uuidBuilder.build()); + keyBuilder.setTableName(ByteStringer.wrap(table)); + keyBuilder.setWriteTime(now); + keyBuilder.setEncodedRegionName(ByteStringer.wrap(HConstants.EMPTY_BYTE_ARRAY)); + keyBuilder.setLogSequenceNumber(-1); + builder.setKey(keyBuilder.build()); + cells.add(kv); + + return builder.build(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/ThriftAdaptorsTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/ThriftAdaptorsTest.java new file mode 100644 index 0000000..f865f12 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/thrift/ThriftAdaptorsTest.java @@ -0,0 +1,98 @@ +package org.apache.hadoop.hbase.replication.thrift; + +import com.google.common.collect.Lists; +import org.apache.commons.lang.builder.EqualsBuilder; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.regionserver.wal.HLog; +import org.apache.hadoop.hbase.regionserver.wal.HLogKey; +import org.apache.hadoop.hbase.regionserver.wal.WALEdit; +import org.apache.hadoop.hbase.replication.thrift.generated.TBatchEdit; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.hamcrest.CoreMatchers; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.util.Arrays; +import java.util.List; +import java.util.UUID; + +import static org.apache.hadoop.hbase.replication.thrift.ThriftAdaptors.REPLICATION_BATCH_ADAPTOR; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +@Category(SmallTests.class) +public class ThriftAdaptorsTest { + + @Test + public void testSerializingHLogEntryAndBack() throws Exception { + long now = System.currentTimeMillis(); + UUID uuid = UUID.randomUUID(); + + List keyValues = makeKeyValues(1, 10, "CF", "CQ", KeyValue.Type.Put, now); + + List clusterIds = Arrays.asList(uuid); + HLogKey key = createKey("region", "table", now, clusterIds); + WALEdit walEdit = createWALEdit(Lists.newArrayList(), keyValues); + HLog.Entry entry = new HLog.Entry( + key, + walEdit + ); + + TBatchEdit result = + REPLICATION_BATCH_ADAPTOR.toThrift(Arrays.asList(entry)); + + assertEquals(1, result.getEditsSize()); + + // change this back to key values and compare each one to ensure they match + List entries = REPLICATION_BATCH_ADAPTOR.fromThrift(result); + HLog.Entry onlyEntry = entries.get(0); + assertTrue(customHLogKeyEquality(onlyEntry.getKey(), key).isEquals()); + List transformedKeyValues = onlyEntry.getEdit().getKeyValues(); + assertThat(transformedKeyValues, CoreMatchers.hasItems( + keyValues.toArray(new KeyValue[keyValues.size()]))); + } + + private HLogKey createKey(String region, String table, long timestamp, List clusterIds) { + return new HLogKey( + Bytes.toBytes(region), + TableName.valueOf(table), + -1, + timestamp, + clusterIds, + HConstants.NO_NONCE, + HConstants.NO_NONCE + ); + } + + private WALEdit createWALEdit(List clusterIds, List keyValues) { + WALEdit edit = new WALEdit(); + for (KeyValue keyValue : keyValues) { + edit.add(keyValue); + } + return edit; + } + + private List makeKeyValues(int from, int to, String family, String cq, + KeyValue.Type type, long ts) { + List result = Lists.newArrayList(); + while (from < to) { + byte[] rowkey = Bytes.toBytes("row-" + from); + result.add( + new KeyValue(rowkey, Bytes.toBytes(family), Bytes.toBytes(cq), ts, type, rowkey)); + from++; + } + return result; + } + + // have to write custom equality because all we nee for replication is + // clusterId and tableName and writeTime + private EqualsBuilder customHLogKeyEquality(HLogKey left, HLogKey right) { + return new EqualsBuilder() + .append(left.getClusterIds(), right.getClusterIds()) + .append(left.getTablename(), right.getTablename()); + } +} \ No newline at end of file diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb b/hbase-shell/src/main/ruby/hbase/replication_admin.rb index cc9e41f..bc66d0a 100644 --- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb +++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb @@ -32,8 +32,8 @@ module Hbase #---------------------------------------------------------------------------------------------- # Add a new peer cluster to replicate to - def add_peer(id, cluster_key, peer_tableCFs = nil) - @replication_admin.addPeer(id, cluster_key, peer_tableCFs) + def add_peer(id, cluster_key, peer_tableCFs = nil, protocol = nil) + @replication_admin.addPeer(id, cluster_key, peer_tableCFs, protocol) end #---------------------------------------------------------------------------------------------- diff --git a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb index ecd8e75..1fc594c 100644 --- a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb +++ b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb @@ -31,12 +31,14 @@ Examples: hbase> add_peer '1', "server1.cie.com:2181:/hbase" hbase> add_peer '2', "zk1,zk2,zk3:2182:/hbase-prod" hbase> add_peer '3', "zk4,zk5,zk6:11000:/hbase-test", "tab1; tab2:cf1; tab3:cf2,cf3" + hbase> add_peer '4', "zk4,zk5,zk6:11000:/hbase-test", " ", "THRIFT" + hbase> add_peer '5', "zk4,zk5,zk6:11000:/hbase-test", "tab1; tab2:cf1; tab3:cf2,cf3", "THRIFT" EOF end - def command(id, cluster_key, peer_tableCFs = nil) + def command(id, cluster_key, peer_tableCFs = nil, protocol = nil) format_simple_command do - replication_admin.add_peer(id, cluster_key, peer_tableCFs) + replication_admin.add_peer(id, cluster_key, peer_tableCFs, protocol) end end end diff --git a/pom.xml b/pom.xml index b99cc78..e2077de 100644 --- a/pom.xml +++ b/pom.xml @@ -969,7 +969,7 @@ 3.0.3 ${compileSource} - 2.2.0 + 2.5.2 1.2.1 1.2 1.7 @@ -995,9 +995,9 @@ 1.9.0 2.5.0 thrift - 0.9.0 + 0.9.1 3.4.6 - 1.6.4 + 1.7.5 0.0.1-SNAPSHOT 2.6.3 2.3.1 -- 2.1.0