From ba46419acb2d044d47d7b61a2d676d0577d0ceac Mon Sep 17 00:00:00 2001 From: Gaurav Menghani Date: Mon, 1 Dec 2014 13:34:22 +0530 Subject: [PATCH] [HBASE-12510] Make hbase-consensus independent of HRegionInfo (and other cruft removal) Summary: We need to remove HRegionInfo from hbase-consensus (and in general all HBase dependencies, but they aren't blocking us right now). This is because: (1) We cannot use our internal HRegionInfo while trying to have a quorum on the WAL. (2) The open source HRegionInfo is different from ours (No QuorumInfo, no HTableDescriptor etc.) (3) We will depend on hbase-client, while hbase-client depends on us (hbase-consensus). This is a cyclic dependency, and will fail to compile. (4) Using HRegionInfo in hbase-consensus doesn't make sense regardless of (1 - 3) because the quorum should be independent of the underlying KV store. Apart from this, I moved the RMap related parts to hbase-server. Need to finish a couple of TODOs there. Also, started using ServerName instead of HServerAddress, wherever possible. Test Plan: Unit tests Reviewers: shroffrishit, aaiyer, fantasist, adela Subscribers: eclark, zelaine.fong Differential Revision: https://reviews.facebook.net/D29685 --- hbase-client/pom.xml | 7 + .../java/org/apache/hadoop/hbase/HRegionInfo.java | 15 + .../java/org/apache/hadoop/hbase/HConstants.java | 7 +- hbase-consensus/pom.xml | 5 - .../java/org/apache/hadoop/hbase/HRegionInfo.java | 838 --------------------- .../java/org/apache/hadoop/hbase/KeyValue.java | 15 +- .../hbase/consensus/client/QuorumClient.java | 2 +- .../hadoop/hbase/consensus/quorum/QuorumInfo.java | 3 + .../consensus/rmap/GetHydraBaseRegionInfoUtil.java | 67 -- .../hadoop/hbase/consensus/rmap/HDFSReader.java | 140 ---- .../hadoop/hbase/consensus/rmap/LocalReader.java | 96 --- .../hbase/consensus/rmap/NoSuchRMapException.java | 10 - .../apache/hadoop/hbase/consensus/rmap/Parser.java | 146 ---- .../hbase/consensus/rmap/RMapConfiguration.java | 330 -------- .../hadoop/hbase/consensus/rmap/RMapException.java | 11 - .../hadoop/hbase/consensus/rmap/RMapJSON.java | 34 - .../hadoop/hbase/consensus/rmap/RMapReader.java | 205 ----- .../hadoop/hbase/consensus/rmap/RegionLocator.java | 142 ---- .../consensus/server/LocalConsensusServer.java | 7 +- .../hadoop/hbase/consensus/util/RaftUtil.java | 16 +- .../hadoop/hbase/regionserver/wal/AbstractWAL.java | 4 +- .../hadoop/hbase/consensus/LocalTestBed.java | 46 +- .../hadoop/hbase/consensus/RaftTestUtil.java | 96 ++- .../consensus/ReplicationLoadForUnitTest.java | 14 +- .../hadoop/hbase/consensus/TestBasicCommit.java | 36 +- .../hbase/consensus/TestBasicLeaderElection.java | 15 +- .../hbase/consensus/TestBasicPeerFailure.java | 34 +- .../hbase/consensus/TestBasicPeerSeeding.java | 22 +- .../hadoop/hbase/consensus/TestBasicPeerSlow.java | 28 +- .../hbase/consensus/TestBasicQuorumCommit.java | 26 +- .../consensus/TestBasicQuorumMembershipChange.java | 27 +- .../hbase/consensus/TestBasicSeedCommitIndex.java | 34 +- .../hadoop/hbase/consensus/TestCommitDeadline.java | 20 +- .../consensus/TestLowerRankBecomingLeader.java | 28 +- .../hbase/consensus/TestPersistLastVotedFor.java | 20 +- .../hbase/consensus/TestRaftEventListener.java | 25 +- .../fsm/TestAsyncStatesInRaftStateMachine.java | 34 +- .../hbase/consensus/log/TestRemoteLogFetcher.java | 20 +- .../hadoop/hbase/consensus/rmap/TestParser.java | 93 --- .../consensus/rmap/TestRMapConfiguration.java | 55 -- .../hbase/consensus/rmap/TestRMapReader.java | 102 --- .../hbase/consensus/rmap/TestRegionLocator.java | 180 ----- hbase-server/pom.xml | 10 + .../hadoop/hbase/consensus/rmap/HDFSReader.java | 140 ++++ .../hadoop/hbase/consensus/rmap/LocalReader.java | 96 +++ .../hbase/consensus/rmap/NoSuchRMapException.java | 10 + .../apache/hadoop/hbase/consensus/rmap/Parser.java | 153 ++++ .../hbase/consensus/rmap/RMapConfiguration.java | 270 +++++++ .../hadoop/hbase/consensus/rmap/RMapException.java | 11 + .../hadoop/hbase/consensus/rmap/RMapJSON.java | 34 + .../hadoop/hbase/consensus/rmap/RMapReader.java | 205 +++++ .../hadoop/hbase/consensus/rmap/TestParser.java | 97 +++ .../consensus/rmap/TestRMapConfiguration.java | 55 ++ .../hbase/consensus/rmap/TestRMapReader.java | 102 +++ 54 files changed, 1483 insertions(+), 2755 deletions(-) delete mode 100644 hbase-consensus/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java delete mode 100644 hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/GetHydraBaseRegionInfoUtil.java delete mode 100644 hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/HDFSReader.java delete mode 100644 hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/LocalReader.java delete mode 100644 hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/NoSuchRMapException.java delete mode 100644 hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/Parser.java delete mode 100644 hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapConfiguration.java delete mode 100644 hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapException.java delete mode 100644 hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapJSON.java delete mode 100644 hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapReader.java delete mode 100644 hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RegionLocator.java delete mode 100644 hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestParser.java delete mode 100644 hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRMapConfiguration.java delete mode 100644 hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRMapReader.java delete mode 100644 hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRegionLocator.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/HDFSReader.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/LocalReader.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/NoSuchRMapException.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/Parser.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapConfiguration.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapException.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapJSON.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapReader.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestParser.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRMapConfiguration.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRMapReader.java diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml index 5d21ea3..0159292 100644 --- a/hbase-client/pom.xml +++ b/hbase-client/pom.xml @@ -118,6 +118,13 @@ org.apache.hbase hbase-protocol + commons-codec diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index 82beb0b..a423d86 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -20,9 +20,11 @@ package org.apache.hadoop.hbase; import java.io.DataInputStream; import java.io.IOException; +import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -118,6 +120,9 @@ public class HRegionInfo implements Comparable { private static final int MAX_REPLICA_ID = 0xFFFF; public static final int DEFAULT_REPLICA_ID = 0; + + // Peers of the Consensus Quorum + // private QuorumInfo quorumInfo; /** * Does region name contain its encoded name? * @param regionName region name @@ -286,6 +291,16 @@ public class HRegionInfo implements Comparable { this(tableName, startKey, endKey, split, regionid, DEFAULT_REPLICA_ID); } + public HRegionInfo(final TableName tableName, final byte[] startKey, + final byte[] endKey, final boolean split, final long regionid, + final Map> peers, + final Map favoredNodesMap) + throws IllegalArgumentException { + this(tableName, startKey, endKey, split, regionid); + // TODO @gauravm + // Set QuorumInfo + } + /** * Construct HRegionInfo with explicit parameters * diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 6001767..81053c9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1097,7 +1097,12 @@ public final class HConstants { 600000; public static final String HBASE_CLIENT_FAST_FAIL_INTERCEPTOR_IMPL = - "hbase.client.fast.fail.interceptor.impl"; + "hbase.client.fast.fail.interceptor.impl"; + + public static final String RMAP_SUBSCRIPTION = "hbase.rmap.subscriptions"; + + public static final String HYDRABASE_DCNAMES = "hbase.hydrabase.dcnames"; + public static final String HYDRABASE_DCNAME = "hbase.hydrabase.dcname"; private HConstants() { // Can't be instantiated with this ctor. diff --git a/hbase-consensus/pom.xml b/hbase-consensus/pom.xml index 6a048eb..ad541f6 100644 --- a/hbase-consensus/pom.xml +++ b/hbase-consensus/pom.xml @@ -224,11 +224,6 @@ ${swift.version} - org.json - json - 20090211 - - commons-httpclient commons-httpclient 3.1 diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java deleted file mode 100644 index a46e187..0000000 --- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ /dev/null @@ -1,838 +0,0 @@ -/** - * Copyright 2007 The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.*; -import java.util.Map.Entry; - -import com.google.common.base.Joiner; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo; -import org.apache.hadoop.hbase.thrift.generated.IllegalArgument; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.JenkinsHash; -import org.apache.hadoop.hbase.util.MD5Hash; -import org.apache.hadoop.io.VersionedWritable; -import org.apache.hadoop.io.WritableComparable; - -/** - * HRegion information. - * Contains HRegion id, start and end keys, a reference to this - * HRegions' table descriptor, etc. - */ -public class HRegionInfo extends VersionedWritable implements WritableComparable{ - private static final byte VERSION = 0; - private static final Log LOG = LogFactory.getLog(HRegionInfo.class); - protected Map favoredNodesMap = new HashMap<>(); - - /** - * The new format for a region name contains its encodedName at the end. - * The encoded name also serves as the directory name for the region - * in the filesystem. - * - * New region name format: - * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. - * where, - * <encodedName> is a hex version of the MD5 hash of - * <tablename>,<startkey>,<regionIdTimestamp> - * - * The old region name format: - * <tablename>,<startkey>,<regionIdTimestamp> - * For region names in the old format, the encoded name is a 32-bit - * JenkinsHash integer value (in its decimal notation, string form). - *

- * **NOTE** - * - * ROOT, the first META region, and regions created by an older - * version of HBase (0.20 or prior) will continue to use the - * old region name format. - */ - - /** Separator used to demarcate the encodedName in a region name - * in the new format. See description on new format above. - */ - private static final int ENC_SEPARATOR = '.'; - public static final int MD5_HEX_LENGTH = 32; - - /** - * Does region name contain its encoded name? - * @param regionName region name - * @return boolean indicating if this a new format region - * name which contains its encoded name. - */ - private static boolean hasEncodedName(final byte[] regionName) { - // check if region name ends in ENC_SEPARATOR - if ((regionName.length >= 1) - && (regionName[regionName.length - 1] == ENC_SEPARATOR)) { - // region name is new format. it contains the encoded name. - return true; - } - return false; - } - - /** - * @param regionName - * @return the encodedName - */ - public static String encodeRegionName(final byte [] regionName) { - String encodedName; - if (hasEncodedName(regionName)) { - // region is in new format: - // ,,/encodedName/ - encodedName = Bytes.toString(regionName, - regionName.length - MD5_HEX_LENGTH - 1, - MD5_HEX_LENGTH); - } else { - // old format region name. ROOT and first META region also - // use this format.EncodedName is the JenkinsHash value. - int hashVal = Math.abs(JenkinsHash.getInstance().hash(regionName, - regionName.length, - 0)); - encodedName = String.valueOf(hashVal); - } - return encodedName; - } - - /** delimiter used between portions of a region name */ - public static final int DELIMITER = ','; - - /** HRegionInfo for root region */ - public static final HRegionInfo ROOT_REGIONINFO = - new HRegionInfo(0L, HTableDescriptor.ROOT_TABLEDESC); - - /** Encoded name for the root region. This is always the same. */ - public static final String ROOT_REGION_ENCODED_NAME_STR = - HRegionInfo.ROOT_REGIONINFO.getEncodedName(); - - /** HRegionInfo for first meta region */ - public static final HRegionInfo FIRST_META_REGIONINFO = - new HRegionInfo(1L, HTableDescriptor.META_TABLEDESC); - - private byte [] endKey = HConstants.EMPTY_BYTE_ARRAY; - private boolean offLine = false; - private long regionId = -1; - private transient byte [] regionName = HConstants.EMPTY_BYTE_ARRAY; - private String regionNameStr = ""; - private boolean split = false; - private byte [] splitPoint = null; - private byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; - protected HTableDescriptor tableDesc = null; - private int hashCode = -1; - //TODO: Move NO_HASH to HStoreFile which is really the only place it is used. - public static final String NO_HASH = null; - private volatile String encodedName = NO_HASH; - - // Peers of the Consensus Quorum - private QuorumInfo quorumInfo; - // For compatability with non-hydrabase mode - public static String LOCAL_DC_KEY = "LOCAL_DC_KEY_FOR_NON_HYDRABASE_MODE"; - - private void setHashCode() { - int result = Arrays.hashCode(this.regionName); - result ^= this.regionId; - result ^= Arrays.hashCode(this.startKey); - result ^= Arrays.hashCode(this.endKey); - result ^= Boolean.valueOf(this.offLine).hashCode(); - result ^= this.tableDesc.hashCode(); - this.hashCode = result; - } - - /** - * Private constructor used constructing HRegionInfo for the catalog root and - * first meta regions - */ - private HRegionInfo(long regionId, HTableDescriptor tableDesc) { - super(); - this.regionId = regionId; - this.tableDesc = tableDesc; - - // Note: Root & First Meta regions names are still in old format - this.regionName = createRegionName(tableDesc.getName(), null, - regionId, false); - this.regionNameStr = Bytes.toStringBinary(this.regionName); - setHashCode(); - } - - /** Default constructor - creates empty object */ - public HRegionInfo() { - super(); - this.tableDesc = new HTableDescriptor(); - } - - /** - * Construct HRegionInfo with explicit parameters - * - * @param tableDesc the table descriptor - * @param startKey first key in region - * @param endKey end of key range - * @throws IllegalArgumentException - */ - public HRegionInfo(final HTableDescriptor tableDesc, final byte [] startKey, - final byte [] endKey) - throws IllegalArgumentException { - this(tableDesc, startKey, endKey, false); - } - - /** - * Construct HRegionInfo with explicit parameters - * - * @param tableDesc the table descriptor - * @param startKey first key in region - * @param endKey end of key range - * @param split true if this region has split and we have daughter regions - * regions that may or may not hold references to this region. - * @throws IllegalArgumentException - */ - public HRegionInfo(HTableDescriptor tableDesc, final byte [] startKey, - final byte [] endKey, final boolean split) - throws IllegalArgumentException { - this(tableDesc, startKey, endKey, split, System.currentTimeMillis()); - } - - /** - * Construct HRegionInfo with explicit parameters - * - * @param tableDesc the table descriptor - * @param startKey first key in region - * @param endKey end of key range - * @param split true if this region has split and we have daughter regions - * regions that may or may not hold references to this region. - * @param regionid Region id to use. - * @throws IllegalArgumentException - */ - public HRegionInfo(HTableDescriptor tableDesc, final byte [] startKey, - final byte [] endKey, final boolean split, final long regionid) - throws IllegalArgumentException { - this(tableDesc, startKey, endKey, split, regionid, null, null); - } - - /** - * Construct HRegionInfo with explicit parameters - * - * @param tableDesc the table descriptor - * @param startKey first key in region - * @param endKey end of key range - * @param split true if this region has split and we have daughter regions - * regions that may or may not hold references to this region. - * @param regionid Region id to use. - * @throws IllegalArgumentException - */ - public HRegionInfo(HTableDescriptor tableDesc, final byte [] startKey, - final byte [] endKey, final boolean split, final long regionid, - final Map> peers, - final Map favoredNodesMap) - throws IllegalArgumentException { - super(); - if (tableDesc == null) { - throw new IllegalArgumentException("tableDesc cannot be null"); - } - this.offLine = false; - this.regionId = regionid; - this.regionName = createRegionName(tableDesc.getName(), startKey, regionId, - !tableDesc.isMetaRegion()); - this.regionNameStr = Bytes.toStringBinary(this.regionName); - this.split = split; - this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone(); - this.startKey = startKey == null? - HConstants.EMPTY_START_ROW: startKey.clone(); - this.tableDesc = tableDesc; - this.quorumInfo = new QuorumInfo(peers, getEncodedName()); - this.favoredNodesMap = favoredNodesMap == null ? - new HashMap() : favoredNodesMap; - setHashCode(); - } - - /** - * Costruct a copy of another HRegionInfo - * - * @param other - */ - public HRegionInfo(HRegionInfo other) { - super(); - this.endKey = other.getEndKey(); - this.offLine = other.isOffline(); - this.regionId = other.getRegionId(); - this.regionName = other.getRegionName(); - this.regionNameStr = Bytes.toStringBinary(this.regionName); - this.split = other.isSplit(); - this.startKey = other.getStartKey(); - this.tableDesc = other.getTableDesc(); - this.hashCode = other.hashCode(); - this.encodedName = other.getEncodedName(); - this.quorumInfo = other.quorumInfo; - this.favoredNodesMap = other.favoredNodesMap; - } - - private static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final long regionid, boolean newFormat) { - return createRegionName(tableName, startKey, Long.toString(regionid), newFormat); - } - - /** - * Make a region name of passed parameters. - * @param tableName - * @param startKey Can be null - * @param id Region id. - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). - * @return Region name made of passed tableName, startKey and id - */ - public static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final String id, boolean newFormat) { - return createRegionName(tableName, startKey, Bytes.toBytes(id), newFormat); - } - /** - * Make a region name of passed parameters. - * @param tableName - * @param startKey Can be null - * @param id Region id - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). - * @return Region name made of passed tableName, startKey and id - */ - public static byte [] createRegionName(final byte [] tableName, - final byte [] startKey, final byte [] id, boolean newFormat) { - byte [] b = new byte [tableName.length + 2 + id.length + - (startKey == null? 0: startKey.length) + - (newFormat ? (MD5_HEX_LENGTH + 2) : 0)]; - - int offset = tableName.length; - System.arraycopy(tableName, 0, b, 0, offset); - b[offset++] = DELIMITER; - if (startKey != null && startKey.length > 0) { - System.arraycopy(startKey, 0, b, offset, startKey.length); - offset += startKey.length; - } - b[offset++] = DELIMITER; - System.arraycopy(id, 0, b, offset, id.length); - offset += id.length; - - if (newFormat) { - // - // Encoded name should be built into the region name. - // - // Use the region name thus far (namely, ,,) - // to compute a MD5 hash to be used as the encoded name, and append - // it to the byte buffer. - // - String md5Hash = MD5Hash.getMD5AsHex(b, 0, offset); - byte [] md5HashBytes = Bytes.toBytes(md5Hash); - - if (md5HashBytes.length != MD5_HEX_LENGTH) { - LOG.error("MD5-hash length mismatch: Expected=" + MD5_HEX_LENGTH + - "; Got=" + md5HashBytes.length); - } - - // now append the bytes '..' to the end - b[offset++] = ENC_SEPARATOR; - System.arraycopy(md5HashBytes, 0, b, offset, MD5_HEX_LENGTH); - offset += MD5_HEX_LENGTH; - b[offset++] = ENC_SEPARATOR; - } - - return b; - } - - /** - * Separate elements of a regionName. - * @param regionName - * @return Array of byte[] containing tableName, startKey and id - * @throws IOException - */ - public static byte [][] parseRegionName(final byte [] regionName) - throws IOException { - int offset = -1; - for (int i = 0; i < regionName.length; i++) { - if (regionName[i] == DELIMITER) { - offset = i; - break; - } - } - if(offset == -1) { - throw new IOException("Invalid regionName format: " + - Bytes.toStringBinary(regionName)); - } - byte [] tableName = new byte[offset]; - System.arraycopy(regionName, 0, tableName, 0, offset); - offset = -1; - for (int i = regionName.length - 1; i > 0; i--) { - if(regionName[i] == DELIMITER) { - offset = i; - break; - } - } - if(offset == -1) { - throw new IOException("Invalid regionName format: " + - Bytes.toStringBinary(regionName)); - } - byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; - if(offset != tableName.length + 1) { - startKey = new byte[offset - tableName.length - 1]; - System.arraycopy(regionName, tableName.length + 1, startKey, 0, - offset - tableName.length - 1); - } - byte [] id = new byte[regionName.length - offset - 1]; - System.arraycopy(regionName, offset + 1, id, 0, - regionName.length - offset - 1); - byte [][] elements = new byte[3][]; - elements[0] = tableName; - elements[1] = startKey; - elements[2] = id; - return elements; - } - - /** @return the regionId */ - public long getRegionId(){ - return regionId; - } - - /** - * @return the regionName as an array of bytes. - * @see #getRegionNameAsString() - */ - public byte [] getRegionName(){ - return regionName; - } - - /** - * @return Region name as a String for use in logging, etc. - */ - public String getRegionNameAsString() { - if (hasEncodedName(this.regionName)) { - // new format region names already have their encoded name. - return this.regionNameStr; - } - - // old format. regionNameStr doesn't have the region name. - // - // - return this.regionNameStr + "." + this.getEncodedName(); - } - - /** @return the encoded region name */ - public synchronized String getEncodedName() { - if (this.encodedName == NO_HASH) { - this.encodedName = encodeRegionName(this.regionName); - } - return this.encodedName; - } - - /** @return the startKey */ - public byte [] getStartKey(){ - return startKey; - } - - /** @return the endKey */ - public byte [] getEndKey(){ - return endKey; - } - - /** - * Returns true if the given inclusive range of rows is fully contained - * by this region. For example, if the region is foo,a,g and this is - * passed ["b","c"] or ["a","c"] it will return true, but if this is passed - * ["b","z"] it will return false. - * @throws IllegalArgumentException if the range passed is invalid (ie end < start) - */ - public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { - if (Bytes.compareTo(rangeStartKey, rangeEndKey) > 0) { - throw new IllegalArgumentException( - "Invalid range: " + Bytes.toStringBinary(rangeStartKey) + - " > " + Bytes.toStringBinary(rangeEndKey)); - } - - boolean firstKeyInRange = Bytes.compareTo(rangeStartKey, startKey) >= 0; - boolean lastKeyInRange = - Bytes.compareTo(rangeEndKey, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY); - return firstKeyInRange && lastKeyInRange; - } - - /** - * Return true if the given row falls in this region. - */ - public boolean containsRow(byte[] row) { - return Bytes.compareTo(row, startKey) >= 0 && - (Bytes.compareTo(row, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); - } - - /** @return the tableDesc */ - public HTableDescriptor getTableDesc(){ - return tableDesc; - } - - /** - * @param newDesc new table descriptor to use - */ - public void setTableDesc(HTableDescriptor newDesc) { - this.tableDesc = newDesc; - } - - /** @return true if this is the root region */ - public boolean isRootRegion() { - return this.tableDesc.isRootRegion(); - } - - /** @return true if this is the meta table */ - public boolean isMetaTable() { - return this.tableDesc.isMetaTable(); - } - - /** @return true if this region is a meta region */ - public boolean isMetaRegion() { - return this.tableDesc.isMetaRegion(); - } - - /** - * @return True if has been split and has daughters. - */ - public boolean isSplit() { - return this.split; - } - - /** - * @param split set split status - */ - public void setSplit(boolean split) { - this.split = split; - } - - /** - * @return point to explicitly split the region on - */ - public byte[] getSplitPoint() { - return (this.splitPoint != null && this.splitPoint.length > 0) - ? this.splitPoint : null; - } - - /** - * @param splitPoint set split status & position to split on - */ - public void setSplitPoint(byte[] splitPoint) { - this.split = true; - this.splitPoint = splitPoint; - } - - /** - * @return True if this region is offline. - */ - public boolean isOffline() { - return this.offLine; - } - - /** - * @param offLine set online - offline status - */ - public void setOffline(boolean offLine) { - this.offLine = offLine; - } - - /** - * @see java.lang.Object#toString() - */ - @Override - public String toString() { - return String.format("REGION => {%s => '%s', STARTKEY => '%s', " + - "ENDKEY => '%s', ENCODED => %s, OFFLINE => %s, SPLIT => %s, " + - "TABLE => {%s}, FAVORED_NODES_MAP => {%s}}", - HConstants.NAME, regionNameStr, Bytes.toStringBinary(startKey), - Bytes.toStringBinary(endKey), getEncodedName(), isOffline(), - isSplit(), tableDesc.toString(), - favoredNodesMap != null ? prettyPrintFavoredNodesMap() : ""); - } - - /** - * @see java.lang.Object#equals(java.lang.Object) - * - * TODO (arjen): this does not consider split and split point! - */ - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null) { - return false; - } - if (!(o instanceof HRegionInfo)) { - return false; - } - - HRegionInfo that = (HRegionInfo)o; - if (this.compareTo(that) != 0) { - return false; - } - - if (this.quorumInfo == null && that.quorumInfo != null) { - return false; - } - if (this.quorumInfo != null && !this.quorumInfo.equals(that.quorumInfo)) { - return false; - } - - return hasSameFavoredNodesMap(that); - } - - /** - * @see java.lang.Object#hashCode() - */ - @Override - public int hashCode() { - return this.hashCode; - } - - /** @return the object version number */ - @Override - public byte getVersion() { - return VERSION; - } - - // - // Writable - // - - @Override - public void write(DataOutput out) throws IOException { - super.write(out); - Bytes.writeByteArray(out, endKey); - out.writeBoolean(offLine); - out.writeLong(regionId); - Bytes.writeByteArray(out, regionName); - out.writeBoolean(split); - if (split) { - Bytes.writeByteArray(out, splitPoint); - } - Bytes.writeByteArray(out, startKey); - tableDesc.write(out); - out.writeInt(hashCode); - } - - @Override - public void readFields(DataInput in) throws IOException { - super.readFields(in); - this.endKey = Bytes.readByteArray(in); - this.offLine = in.readBoolean(); - this.regionId = in.readLong(); - this.regionName = Bytes.readByteArray(in); - this.regionNameStr = Bytes.toStringBinary(this.regionName); - this.split = in.readBoolean(); - if (this.split) { - this.splitPoint = Bytes.readByteArray(in); - } - this.startKey = Bytes.readByteArray(in); - this.tableDesc.readFields(in); - this.hashCode = in.readInt(); - if (quorumInfo == null) { - quorumInfo = new QuorumInfo( - new HashMap>(), - HRegionInfo.encodeRegionName(regionName)); - } - } - - // - // Comparable - // - - public int compareTo(HRegionInfo o) { - if (o == null) { - return 1; - } - - // Are regions of same table? - int result = this.tableDesc.compareTo(o.tableDesc); - if (result != 0) { - return result; - } - - // Compare start keys. - result = Bytes.compareTo(this.startKey, o.startKey); - if (result != 0) { - return result; - } - - // Compare end keys. - return Bytes.compareTo(this.endKey, o.endKey); - } - - /** - * @return Comparator to use comparing {@link KeyValue}s. - */ - public KVComparator getComparator() { - return isRootRegion()? KeyValue.ROOT_COMPARATOR: isMetaRegion()? - KeyValue.META_COMPARATOR: KeyValue.COMPARATOR; - } - - public Map getPeersWithRank() { - return getQuorumInfo().getPeersWithRank(); - } - - public Map getPeersWithCluster() { - return getQuorumInfo().getPeersWithCluster(); - } - - @Deprecated - public InetSocketAddress[] getFavoredNodes() { - return getFavoredNodes(LOCAL_DC_KEY); - } - - public InetSocketAddress[] getFavoredNodes(String dcKey) { - return this.favoredNodesMap != null? - this.favoredNodesMap.get(dcKey): - null; - } - - @Deprecated - public void setFavoredNodes(InetSocketAddress[] favoredNodes) { - setFavoredNodes(LOCAL_DC_KEY, favoredNodes); - } - - public void setFavoredNodes(String dcName, InetSocketAddress[] favoredNodes) { - if (this.favoredNodesMap == null) { - this.favoredNodesMap = new HashMap<>(); - } - this.favoredNodesMap.put(dcName, favoredNodes); - setHashCode(); - } - - public void setPeers(Map> peers) { - this.quorumInfo.setPeers(peers); - } - - public Map> getPeers() { - QuorumInfo quorumInfo = getQuorumInfo(); - if (quorumInfo != null) { - return quorumInfo.getPeers(); - } - return null; - } - - public Map getFavoredNodesMap() { - return favoredNodesMap; - } - - public void setFavoredNodesMap( - final Map favoredNodesMap) { - this.favoredNodesMap = favoredNodesMap; - } - - public boolean hasSameFavoredNodesMap(final HRegionInfo that) { - if (that == null) { - return false; - } - - if (!this.favoredNodesMap.keySet().equals(that.favoredNodesMap.keySet())) { - return false; - } - - for (String domain : this.favoredNodesMap.keySet()) { - if (!Arrays.equals(this.favoredNodesMap.get(domain), - that.favoredNodesMap.get(domain))) { - return false; - } - } - return true; - } - - public QuorumInfo getQuorumInfo() { - return quorumInfo; - } - - public void setQuorumInfo(final QuorumInfo quorumInfo) { - this.quorumInfo = quorumInfo; - } - - public String prettyPrintFavoredNodesMap() { - if (favoredNodesMap == null) { - return ""; - } - StringBuilder sb = new StringBuilder(128); - Iterator> it - = favoredNodesMap.entrySet().iterator(); - while (it.hasNext()) { - Map.Entry domain = it.next(); - InetSocketAddress[] favoredNodes = domain.getValue(); - sb.append(domain.getKey()); - sb.append(" => ["); - if (favoredNodes != null) { - sb.append(Joiner.on(", ").join(favoredNodes)); - } - sb.append(it.hasNext() ? "], " : "]"); - } - return sb.toString(); - } - - public static class MultiDCHRegionInfo extends HRegionInfo { - private Map> combinedPeersMap; - - public MultiDCHRegionInfo(String dcsite, HRegionInfo regionInfo) { - super(regionInfo); - this.favoredNodesMap = new HashMap<>(); - this.favoredNodesMap.put(dcsite, regionInfo.getFavoredNodes()); - this.combinedPeersMap = regionInfo.getPeers(); - } - - public void merge(String otherDC, HRegionInfo other) { - this.favoredNodesMap.put(otherDC, other.getFavoredNodes()); - } - - public void validate(int quorumSize, Map maxPeersPerDC) - throws IllegalArgument { - if (favoredNodesMap.size() == 0) { - return; - } - - int rankNum = quorumSize; - for (String cluster : maxPeersPerDC.keySet()) { - int numPeerAssignedPerDC = maxPeersPerDC.get(cluster).intValue(); - if (combinedPeersMap.get(cluster) == null) { - combinedPeersMap.put(cluster, new HashMap - ()); - } - InetSocketAddress[] peerAddr = favoredNodesMap.get(cluster); - for (InetSocketAddress addr : peerAddr) { - this.combinedPeersMap.get(cluster).put(new HServerAddress(addr), rankNum--); - if (--numPeerAssignedPerDC == 0) { - break; - } - } - if (rankNum <= 0) { - break; - } - } - - if (rankNum > 0) { - throw new IllegalArgument("Not enough nodes to complete the peer" + - " the peer assignment."); - } - } - - @Override - public Map> getPeers() { - return combinedPeersMap; - } - } -} diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/KeyValue.java index 8a9dce6..5a55683 100644 --- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -42,8 +42,6 @@ import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.Writable; -import com.google.common.primitives.Longs; - /** * An HBase Key/Value. * @@ -82,6 +80,7 @@ public final class KeyValue implements Writable, HeapSize, Cloneable { * Colon character in UTF-8 */ public static final char COLUMN_FAMILY_DELIMITER = ':'; + public static final int DELIMITER = ','; public static final byte[] COLUMN_FAMILY_DELIM_ARRAY = new byte[]{COLUMN_FAMILY_DELIMITER}; @@ -1931,11 +1930,11 @@ public final class KeyValue implements Writable, HeapSize, Cloneable { int lmetaOffsetPlusDelimiter = loffset + metalength; int leftFarDelimiter = getDelimiterInReverse(left, lmetaOffsetPlusDelimiter, - llength - metalength, HRegionInfo.DELIMITER); + llength - metalength, DELIMITER); int rmetaOffsetPlusDelimiter = roffset + metalength; int rightFarDelimiter = getDelimiterInReverse(right, rmetaOffsetPlusDelimiter, rlength - metalength, - HRegionInfo.DELIMITER); + DELIMITER); if (leftFarDelimiter < 0 && rightFarDelimiter >= 0) { // Nothing between .META. and regionid. Its first key. return -1; @@ -1986,9 +1985,9 @@ public final class KeyValue implements Writable, HeapSize, Cloneable { // LOG.info("META " + Bytes.toString(left, loffset, llength) + // "---" + Bytes.toString(right, roffset, rlength)); int leftDelimiter = getDelimiter(left, loffset, llength, - HRegionInfo.DELIMITER); + DELIMITER); int rightDelimiter = getDelimiter(right, roffset, rlength, - HRegionInfo.DELIMITER); + DELIMITER); if (leftDelimiter < 0 && rightDelimiter >= 0) { // Nothing between .META. and regionid. Its first key. return -1; @@ -2008,10 +2007,10 @@ public final class KeyValue implements Writable, HeapSize, Cloneable { leftDelimiter++; rightDelimiter++; int leftFarDelimiter = getRequiredDelimiterInReverse(left, leftDelimiter, - llength - (leftDelimiter - loffset), HRegionInfo.DELIMITER); + llength - (leftDelimiter - loffset), DELIMITER); int rightFarDelimiter = getRequiredDelimiterInReverse(right, rightDelimiter, rlength - (rightDelimiter - roffset), - HRegionInfo.DELIMITER); + DELIMITER); // Now compare middlesection of row. result = super.compareRows(left, leftDelimiter, leftFarDelimiter - leftDelimiter, right, rightDelimiter, diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/client/QuorumClient.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/client/QuorumClient.java index 61c042d..2901a54 100644 --- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/client/QuorumClient.java +++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/client/QuorumClient.java @@ -69,7 +69,7 @@ public class QuorumClient { protected QuorumClient(String regionId, final Configuration conf, ExecutorService pool) throws IOException { - this(RaftUtil.createDummyRegionInfo(regionId).getQuorumInfo(), conf, pool); + this(RaftUtil.createDummyQuorumInfo(regionId), conf, pool); } public synchronized long replicateCommits(List txns) diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/quorum/QuorumInfo.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/quorum/QuorumInfo.java index 66e5406..8755cbb 100644 --- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/quorum/QuorumInfo.java +++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/quorum/QuorumInfo.java @@ -14,6 +14,9 @@ public class QuorumInfo { + Bytes.SIZEOF_BYTE // Payload type + Bytes.SIZEOF_BYTE; // Payload version + // For compatability with non-hydrabase mode + public static String LOCAL_DC_KEY = "LOCAL_DC_KEY_FOR_NON_HYDRABASE_MODE"; + private Map> peers = null; private Map peersWithRank = null; private Set peersAsString = null; diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/GetHydraBaseRegionInfoUtil.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/GetHydraBaseRegionInfoUtil.java deleted file mode 100644 index 1dd4b03..0000000 --- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/GetHydraBaseRegionInfoUtil.java +++ /dev/null @@ -1,67 +0,0 @@ -package org.apache.hadoop.hbase.consensus.rmap; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.log4j.Level; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.URI; -import java.util.HashMap; -import java.util.Map; -import java.util.List; - -/** - * Takes comma-separated list of (full/partial) region-names and output the - * required information about that region - */ -public class GetHydraBaseRegionInfoUtil { - private static Logger LOG = LoggerFactory.getLogger( - GetHydraBaseRegionInfoUtil.class); - - public static void main(String[] args) throws IOException, RMapException { - - // Silent the noisy o/p - org.apache.log4j.Logger.getLogger( - "org.apache.zookeeper").setLevel(Level.ERROR); - org.apache.log4j.Logger.getLogger( - "org.apache.hadoop.conf.ClientConfigurationUtil").setLevel(Level.ERROR); - org.apache.log4j.Logger.getLogger( - "org.apache.hadoop.fs").setLevel(Level.ERROR); - org.apache.log4j.Logger.getLogger( - "org.apache.hadoop.util.NativeCodeLoader").setLevel(Level.ERROR); - - String[] regions = args[0].split(","); - Configuration conf = HBaseConfiguration.create(); - RMapConfiguration rMapConfiguration = new RMapConfiguration(conf); - - Map regionInfoMap = new HashMap<>(); - List regionInfoList; - - URI uri = rMapConfiguration.getRMapSubscription(conf); - if (uri != null) { - rMapConfiguration.readRMap(uri); - regionInfoList = rMapConfiguration.getRegions(uri); - for (HRegionInfo r : regionInfoList) { - regionInfoMap.put(r.getEncodedName(), r); - } - } - - HRegionInfo region; - for (String regionName : regions) { - if ((region = regionInfoMap.get(regionName)) != null) { - LOG.info(String.format("%s:[table: %s, start_key: %s, " + - "end_key: %s, peers: %s]", regionName, - region.getTableDesc().getNameAsString(), - Bytes.toStringBinary(region.getStartKey()), - Bytes.toStringBinary(region.getEndKey()), - region.getQuorumInfo().getPeersAsString())); - } else { - LOG.error("No region found with encoded name " + regionName); - } - } - } -} diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/HDFSReader.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/HDFSReader.java deleted file mode 100644 index 7d6b0f7..0000000 --- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/HDFSReader.java +++ /dev/null @@ -1,140 +0,0 @@ -package org.apache.hadoop.hbase.consensus.rmap; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.util.Bytes; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -public class HDFSReader extends RMapReader { - protected static final Logger LOG = LoggerFactory.getLogger(HDFSReader.class); - - private Configuration conf; - - public HDFSReader(final Configuration conf) { - this.conf = conf; - } - - @Override - public List getVersions(URI uri) throws IOException { - Path path = new Path(getSchemeAndPath(uri)); - FileSystem fs = path.getFileSystem(conf); - FileStatus[] statuses = fs.globStatus(new Path(path.toString() + ".*")); - - List versions = new ArrayList<>(statuses.length); - for (FileStatus status : statuses) { - long version = getVersionFromPath(status.getPath().toString()); - if (version > 0) { - versions.add(version); - } - } - Collections.sort(versions); - return versions; - } - - @Override - public URI resolveSymbolicVersion(URI uri) throws URISyntaxException { - long version = getVersion(uri); - String schemeAndPath = getSchemeAndPath(uri); - - if (version == RMapReader.CURRENT || version == RMapReader.NEXT) { - Path link = new Path(String.format("%s.%s", schemeAndPath, - version == RMapReader.CURRENT ? "CURRENT" : "NEXT")); - // Resolve to an explicit version, or UNKNOWN - try { - Path target = getLinkTarget(link); - version = target != null ? getVersionFromPath(target.toString()) : - RMapReader.UNKNOWN; - } catch (IOException e) { - LOG.error("Failed to look up version from link:", e); - version = RMapReader.UNKNOWN; - } - } - - if (version > 0) { - return new URI(String.format("%s?version=%d", schemeAndPath, version)); - } - return new URI(schemeAndPath); - } - - @Override - public String readRMapAsString(final URI uri) throws IOException { - // Get file status, throws IOException if the path does not exist. - Path path = getPathWithVersion(uri); - FileSystem fs = path.getFileSystem(conf); - FileStatus status = fs.getFileStatus(path); - - long n = status.getLen(); - if (n < 0 || n > MAX_SIZE_BYTES) { - throw new IOException(String.format("Invalid RMap file size " + - "(expected between 0 and %d but got %d bytes)", - MAX_SIZE_BYTES, n)); - } - - byte[] buf = new byte[(int)n]; - FSDataInputStream stream = fs.open(path); - try { - stream.readFully(buf); - } finally { - stream.close(); - } - return Bytes.toString(buf); - } - - public Path getPathWithVersion(final URI uri) throws IOException { - long version = RMapReader.UNKNOWN; - try { - version = getVersion(resolveSymbolicVersion(uri)); - } catch (URISyntaxException e) { - // Ignore invalid URIs and assume version UNKNOWN - } - - if (version > 0) { - return new Path(String.format("%s.%d", getSchemeAndPath(uri), version)); - } - return new Path(uri.toString()); - } - - private long getVersionFromPath(final String path) { - String[] tokens = path.split("[\\.]"); - try { - return Long.parseLong(tokens[tokens.length - 1]); - } catch (NumberFormatException e) { - // Skip if token not numerical - } - return RMapReader.UNKNOWN; - } - - private Path getLinkTarget(final Path path) throws IOException { - FileSystem fs = path.getFileSystem(conf); - - // The getHardLinkedFiles call is a bit tricky, as it effectively returns - // all other paths to the inode shared with the given path. In order to - // guard against erroneous links, only consider those where the paths - // are the same, up to the version. - String pathWithoutVersion = path.toString().substring(0, - path.toString().lastIndexOf('.')); - /* -TODO: FIXME: Amit: this code works with the internal hdfs. might not work with the -OSS version. - - for (String link : fs.getHardLinkedFiles(path)) { - if (path.toString().startsWith(pathWithoutVersion) && - getVersionFromPath(link) > 0) { - return new Path(link); - } - } - */ - return null; - } -} diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/LocalReader.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/LocalReader.java deleted file mode 100644 index fc1e877..0000000 --- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/LocalReader.java +++ /dev/null @@ -1,96 +0,0 @@ -package org.apache.hadoop.hbase.consensus.rmap; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -public class LocalReader extends RMapReader { - protected static final Logger LOG = LoggerFactory.getLogger( - LocalReader.class); - - @Override - public List getVersions(final URI uri) throws IOException { - Path path = Paths.get(uri); - List versions = new ArrayList<>(); - - for (Path match : Files.newDirectoryStream(path.getParent(), - path.getFileName() + ".*")) { - long version = getVersionFromPath(match.toString()); - if (version > 0) { - versions.add(version); - } - } - Collections.sort(versions); - return versions; - } - - @Override - public URI resolveSymbolicVersion(URI uri) throws URISyntaxException { - long version = getVersion(uri); - String schemeAndPath = getSchemeAndPath(uri); - - if (version == RMapReader.CURRENT || version == RMapReader.NEXT) { - Path link = Paths.get(String.format("%s.%s", schemeAndPath, - version == RMapReader.CURRENT ? "CURRENT" : "NEXT")); - // Resolve to an explicit version, or UNKNOWN - try { - version = getVersionFromPath(Files.readSymbolicLink(link).toString()); - } catch (IOException e) { - LOG.error("Failed to look up version from link:", e); - version = RMapReader.UNKNOWN; - } - } - - if (version > 0) { - return new URI(String.format("%s?version=%d", schemeAndPath, version)); - } - return new URI(schemeAndPath); - } - - @Override - public String readRMapAsString(final URI uri) throws IOException { - Path path = getPathWithVersion(uri); - - long n = Files.size(path); - if (n < 0 || n > MAX_SIZE_BYTES) { - throw new IOException(String.format("Invalid RMap file size " + - "(expected between 0 and %d but got %d bytes)", - MAX_SIZE_BYTES, n)); - } - - return new String(Files.readAllBytes(path)); - } - - private long getVersionFromPath(final String path) { - String[] tokens = path.split("[\\.]"); - try { - return Long.parseLong(tokens[tokens.length - 1]); - } catch (NumberFormatException e) { - // Skip if token not numerical - } - return RMapReader.UNKNOWN; - } - - private Path getPathWithVersion(final URI uri) { - long version = RMapReader.UNKNOWN; - try { - version = getVersion(resolveSymbolicVersion(uri)); - } catch (URISyntaxException e) { - // Ignore invalid URIs and assume version UNKNOWN - } - - if (version > 0) { - return Paths.get(String.format("%s.%d", uri.getPath(), version)); - } - return Paths.get(uri); - } -} diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/NoSuchRMapException.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/NoSuchRMapException.java deleted file mode 100644 index 6136063..0000000 --- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/NoSuchRMapException.java +++ /dev/null @@ -1,10 +0,0 @@ -package org.apache.hadoop.hbase.consensus.rmap; - -import java.io.IOException; -import java.net.URI; - -public class NoSuchRMapException extends IOException { - public NoSuchRMapException(final URI uri) { - super("No RMap found with URI " + uri); - } -} diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/Parser.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/Parser.java deleted file mode 100644 index f345b1a..0000000 --- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/Parser.java +++ /dev/null @@ -1,146 +0,0 @@ -package org.apache.hadoop.hbase.consensus.rmap; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerAddress; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.util.Bytes; -import org.json.JSONArray; -import org.json.JSONException; -import org.json.JSONObject; - -import java.net.InetSocketAddress; -import java.util.*; - -public class Parser { - private Configuration conf; - - public Parser(final Configuration conf) { - this.conf = conf; - } - - public List parseEncodedRMap(JSONObject encodedRMap) - throws JSONException { - List regions = new ArrayList<>(); - JSONObject tables = encodedRMap.getJSONObject("tables"); - - for (Iterator names = tables.keys(); names.hasNext();) { - String name = names.next(); - regions.addAll(parseTable(name, tables.getJSONObject(name))); - } - - return regions; - } - - public List parseTable(String name, JSONObject table) - throws JSONException { - HTableDescriptor tableDesc = new HTableDescriptor(name); - List regions = Collections.emptyList(); - Iterator keys = table.keys(); - while (keys.hasNext()) { - String key = keys.next(); - if (key.equals("families")) { - JSONObject families = table.getJSONObject(key); - Iterator familyKeys = families.keys(); - while (familyKeys.hasNext()) { - String familyName = familyKeys.next(); - JSONObject familyJson = families.getJSONObject(familyName); - tableDesc.addFamily(parseFamily(familyName, familyJson)); - } - } else if (key.equals("regions")) { - JSONArray regionsJson = table.getJSONArray(key); - int length = regionsJson.length(); - regions = new ArrayList<>(length); - for (int i = 0; i < length; ++i) { - regions.add(parseRegion(tableDesc, regionsJson.getJSONObject(i))); - } - } else { - String value = table.get(key).toString(); - tableDesc.setValue(key, value); - } - } - return regions; - } - - public HColumnDescriptor parseFamily(String name, JSONObject family) - throws JSONException { - HColumnDescriptor columnDesc = new HColumnDescriptor(); - columnDesc.setName(Bytes.toBytes(name)); - Iterator keys = family.keys(); - while (keys.hasNext()) { - String key = keys.next(); - String value = family.get(key).toString(); - columnDesc.setValue(key, value); - } - return columnDesc; - } - - public HRegionInfo parseRegion(HTableDescriptor table, JSONObject region) - throws JSONException { - long id = region.getLong("id"); - byte[] startKey = Bytes.toBytes(region.getString("start_key")); - byte[] endKey = Bytes.toBytes(region.getString("end_key")); - Map> peers = parsePeers(region - .getJSONObject("peers")); - Map favoredNodesMap = parseFavoredNodesMap(region - .getJSONObject("favored_nodes")); - return new HRegionInfo(table, startKey, endKey, false, id, peers, - favoredNodesMap); - } - - public Map> parsePeers(JSONObject peersJson) - throws JSONException { - Map> peers = new LinkedHashMap<>(); - Iterator keys = peersJson.keys(); - while (keys.hasNext()) { - String cellName = keys.next(); - JSONArray peersWithRank = peersJson.getJSONArray(cellName); - peers.put(cellName, parsePeersWithRank(peersWithRank)); - } - return peers; - } - - public Map parsePeersWithRank(JSONArray peersJson) - throws JSONException { - Map peers = new LinkedHashMap(); - for (int i = 0; i < peersJson.length(); ++i) { - String peer = peersJson.getString(i); - int colonIndex = peer.lastIndexOf(':'); - peers.put(new HServerAddress(peer.substring(0, colonIndex)), - Integer.valueOf(peer.substring(colonIndex + 1))); - } - return peers; - } - - Map parseFavoredNodesMap(JSONObject favoredNodesJson) - throws JSONException { - Iterator keys = favoredNodesJson.keys(); - - HashMap favoredNodesMap = new HashMap<>(); - while (keys.hasNext()) { - String cellName = keys.next(); - JSONArray peersWithRank = favoredNodesJson.getJSONArray(cellName); - favoredNodesMap.put(cellName, parseFavoredNodes(peersWithRank)); - } - return favoredNodesMap; - } - - public InetSocketAddress[] parseFavoredNodes(JSONArray favoredNodesInCell) - throws JSONException { - if (favoredNodesInCell == null) { - return null; - } else { - int length = favoredNodesInCell.length(); - InetSocketAddress[] favoredNodes = new InetSocketAddress[length]; - for (int i = 0; i < length; ++i) { - String node = favoredNodesInCell.getString(i); - int colonIndex = node.lastIndexOf(':'); - favoredNodes[i] = new InetSocketAddress(node.substring(0, colonIndex), - Integer.parseInt(node.substring(colonIndex + 1))); - - } - return favoredNodes; - } - } -} diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapConfiguration.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapConfiguration.java deleted file mode 100644 index 00306dc..0000000 --- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapConfiguration.java +++ /dev/null @@ -1,330 +0,0 @@ -package org.apache.hadoop.hbase.consensus.rmap; - -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.CommandLineParser; -import org.apache.commons.cli.HelpFormatter; -import org.apache.commons.cli.Options; -import org.apache.commons.cli.ParseException; -import org.apache.commons.cli.PosixParser; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerAddress; -import org.json.JSONException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeSet; - -public class RMapConfiguration { - private static final Logger LOG = LoggerFactory.getLogger(RMapConfiguration.class); - - private Configuration conf; - - private Map appliedRMaps; - private Map cachedRMaps; - - public RMapConfiguration(final Configuration conf) { - this.conf = conf; - this.appliedRMaps = new HashMap<>(); - this.cachedRMaps = new HashMap<>(); - } - - public static URI getRMapSubscription(final Configuration conf) { - String[] subscriptionsList = - conf.get(HConstants.RMAP_SUBSCRIPTION, "").split(","); - if (subscriptionsList.length >= 1) { - if (subscriptionsList.length > 1) { - LOG.warn(String.format("We do not support multiple RMaps. " + - "Using the first RMap as the correct one: %s", subscriptionsList[0])); - } - else if (!subscriptionsList[0].equals("")) { - try { - return new URI(subscriptionsList[0]); - } catch (URISyntaxException e) { - LOG.warn(String.format("Failed to parse URI for subscription %s: ", - subscriptionsList[0]), e); - } - } - } - return null; - } - - public static RMapReader getRMapReader(final Configuration conf, - final URI uri) throws RMapException { - switch (uri.getScheme()) { - case "file": - return new LocalReader(); - case "hdfs": - return new HDFSReader(conf); - default: - throw new RMapException("No reader found for RMap: " + uri); - } - } - - public synchronized RMap getRMap(URI uri) - throws IOException, RMapException { - return getRMap(uri, false); - } - - public synchronized RMap getRMap(URI uri, boolean reload) - throws IOException, RMapException { - try { - RMapReader reader = getRMapReader(conf, uri); - URI nonSymbolicURI = reader.resolveSymbolicVersion(uri); - // Try to get a cached instance of the RMap. - RMap rmap = cachedRMaps.get(nonSymbolicURI); - if (reload || rmap == null) { - // No cached instance was found, read it using the reader. - RMapJSON encodedRMap = reader.readRMap(nonSymbolicURI); - rmap = new RMap(encodedRMap.uri, - new Parser(conf).parseEncodedRMap(encodedRMap.getEncodedRMap()), - encodedRMap.signature); - cachedRMaps.put(rmap.uri, rmap); - } - return rmap; - } catch (URISyntaxException e) { - throw new RMapException("URI syntax invalid for RMap: " + uri, e); - } catch (JSONException e) { - throw new RMapException("Failed to decode JSON for RMap: " + uri, e); - } - } - - /** - * Reads and caches the RMap from the given URI and returns its signature. - * - * @param uri - * @return - */ - public synchronized String readRMap(final URI uri) throws IOException, - RMapException { - return getRMap(uri).signature; - } - - public synchronized String readRMap(URI uri, boolean reload) - throws IOException, RMapException { - return getRMap(uri, reload).signature; - } - - /** - * Get the list of regions which need to be updated in order to transition to - * this (version) of the RMap by the given URI. - * - * @param uri of the RMap - * @return a list of regions - */ - public synchronized Collection getTransitionDelta(final URI uri) - throws IOException, RMapException { - RMap nextRMap = getRMap(uri); - RMap currentRMap = appliedRMaps.get(RMapReader.getSchemeAndPath(uri)); - - // The standard Set implementations seem to be using compareTo() for their - // operations. On the HRegionInfo objects compareTo() and equals() have - // different properties where equals() is needed here. What follows is a - // poor mans Set comparison to determine which regions need to be modified - // to make the RMap transition. - if (nextRMap != null) { - HashMap delta = new HashMap<>(); - for (HRegionInfo next : nextRMap.regions) { - delta.put(next.getEncodedName(), next); - } - - if (currentRMap != null) { - // Remove all regions already present in the current RMap from the - // delta. This should use the {@link HRegionInfo.equals} method as it - // should consider the favored nodes and replicas. - for (HRegionInfo current : currentRMap.regions) { - HRegionInfo next = delta.get(current.getEncodedName()); - if (next != null) { - if (next.equals(current)) { - delta.remove(next.getEncodedName()); - } - } - } - } - - return delta.values(); - } - - return Collections.emptyList(); - } - - public synchronized void appliedRMap(final URI uri) throws IOException, - RMapException { - RMap previous = appliedRMaps.put(RMapReader.getSchemeAndPath(uri), - getRMap(uri)); - // Purge the earlier version of the RMap from cache. - if (previous != null) { - cachedRMaps.remove(previous.uri); - } - } - - public synchronized boolean isRMapApplied(final URI uri) { - RMap active = appliedRMaps.get(RMapReader.getSchemeAndPath(uri)); - if (active != null) { - return active.uri.equals(uri); - } - return false; - } - - public synchronized RMap getAppliedRMap(String uri) { - return appliedRMaps.get(uri); - } - - public synchronized List getRegions(final URI uri) - throws IOException, RMapException { - RMap rmap = getRMap(uri); - if (rmap == null) { - return Collections.emptyList(); - } - return Collections.unmodifiableList(rmap.regions); - } - - public synchronized void clearFromRMapCache(URI uri) { - cachedRMaps.remove(uri); - } - - /** - * Replace the content of cached RMap. For testing only! - * - * @param uri - * @param rMap - */ - public synchronized void cacheCustomRMap(URI uri, RMap rMap) { - cachedRMaps.put(uri, rMap); - appliedRMaps.put(uri.toString(), rMap); - } - - public class RMap { - public final URI uri; - public final List regions; - public final String signature; - - RMap(final URI uri, final List regions, - final String signature) { - this.uri = uri; - this.regions = regions; - this.signature = signature; - } - - /** - * Return the quorum size in the RMap. - * @return - */ - public int getQuorumSize() { - if (regions.size() == 0) { - return 0; - } - return regions.get(0).getQuorumInfo().getQuorumSize(); - } - - /** - * Return the list of regions that are served by the specified server. - * @param hServerAddress - * @return - */ - public List getRegionsForServer(HServerAddress hServerAddress) { - List ret = new ArrayList(); - for (HRegionInfo region: regions) { - if (region.getPeersWithRank().containsKey(hServerAddress)) { - ret.add(region); - } - } - return ret; - } - - /** - * Returns the set of servers that are hosting any of the regions in the RMap. - * @return - */ - public Set getAllServers() { - Set ret = new HashSet<>(); - for (HRegionInfo region: regions) { - ret.addAll(region.getPeersWithRank().keySet()); - } - return ret; - } - - /** - * Create a customized RMap for test use only! - * - * @param uri - * @param regions - * @param signature - * @return - */ - public RMap createCustomizedRMap(URI uri, - List regions, - String signature) { - return new RMapConfiguration.RMap( - uri == null ? this.uri : uri, - regions == null ? this.regions : regions, - signature == null ? this.signature : signature - ); - } - - @Override - public boolean equals(Object obj) { - if (obj == null || !(obj instanceof RMap)) { - return false; - } - RMap that = (RMap)obj; - if (this.regions == null || that.regions == null || this.regions.size() != that.regions.size()) { - return false; - } - Set regionInfos = new TreeSet<>(); - regionInfos.addAll(regions); - for (HRegionInfo region : that.regions) { - if (!regionInfos.contains(region)) { - return false; - } - regionInfos.remove(region); - } - return regionInfos.isEmpty(); - } - } - - /** - * Creates a temporary name for an RMap, based on the date and time. - * @return - */ - public static String createRMapName() { - SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd-HHmmss"); - return "rmap.json." + format.format(System.currentTimeMillis()); - } - - /** - * View information about an RMap. Currently only prints its signature. - * @param args - */ - public static void main(String[] args) throws ParseException, - URISyntaxException, RMapException, IOException { - Options options = new Options(); - options.addOption("r", "rmap", true, "Name of the rmap"); - - CommandLineParser parser = new PosixParser(); - CommandLine cmd = parser.parse(options, args); - - if (!cmd.hasOption("r")) { - System.out.println("Please specify the rmap with -r"); - return; - } - - String rmapUriStr = cmd.getOptionValue("r"); - RMapConfiguration conf = new RMapConfiguration(new Configuration()); - String rmapStr = conf.readRMap(new URI(rmapUriStr)); - LOG.debug("RMap Signature: " + rmapStr); - } -} diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapException.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapException.java deleted file mode 100644 index 31621ab..0000000 --- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapException.java +++ /dev/null @@ -1,11 +0,0 @@ -package org.apache.hadoop.hbase.consensus.rmap; - -public class RMapException extends Exception { - public RMapException(final String message) { - super(message); - } - - public RMapException(final String message, final Throwable cause) { - super(message, cause); - } -} diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapJSON.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapJSON.java deleted file mode 100644 index 6d06123..0000000 --- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapJSON.java +++ /dev/null @@ -1,34 +0,0 @@ -package org.apache.hadoop.hbase.consensus.rmap; - -import org.json.JSONObject; - -import java.net.URI; - -public class RMapJSON { - final URI uri; - final JSONObject rmap; - final String signature; - - public RMapJSON(final URI uri, final JSONObject rmap, - final String signature) { - this.uri = uri; - this.rmap = rmap; - this.signature = signature; - } - - public long getVersion() { - return RMapReader.getVersion(uri); - } - - public URI getURI() { - return uri; - } - - public JSONObject getEncodedRMap() { - return rmap; - } - - public String getSignature() { - return signature; - } -} diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapReader.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapReader.java deleted file mode 100644 index dc81d34..0000000 --- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapReader.java +++ /dev/null @@ -1,205 +0,0 @@ -package org.apache.hadoop.hbase.consensus.rmap; - -import org.apache.commons.codec.binary.Hex; -//import org.apache.hadoop.hbase.thrift.generated.Hbase; -import org.apache.http.NameValuePair; -import org.apache.http.client.utils.URLEncodedUtils; -import org.json.JSONException; -import org.json.JSONObject; - -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.List; - -public abstract class RMapReader { - /** Max file sisze of a single file containing a RMap */ - public static long MAX_SIZE_BYTES = 16 * 1024 * 1204; // 16 MB - - /** RMap version special values */ - public static long NEXT = -2; - public static long CURRENT = -1; - public static long UNKNOWN = 0; - - /** - * Return a naturally sorted list of available versions of a given RMap URI. - * - * @param uri URI of the RMap - * @return a naturally sorted list of versions of the given RMap URI - * @throws IOException if an exception occurs while reading versions - */ - public abstract List getVersions(final URI uri) throws IOException; - - /** - * Resolve a URI containing a symbolic version into a URI with an absolute - * value which can be requested from the reader. - * - * @param uri URI containing a symbolic version - * @return a URI containing an absolute version - * @throws URISyntaxException if the given URI is malformed - */ - public abstract URI resolveSymbolicVersion(final URI uri) - throws URISyntaxException; - - /** - * Return the contents of the RMap at given URI as a string. - * - * @param uri URI of the RMap - * @return contents of the RMap as String - * @throws IOException if an exception occurs while reading the RMap - */ - public abstract String readRMapAsString(final URI uri) throws IOException; - - /** - * Return the version number of the RMap specified in the given URI. - * - * @param uri URI of the RMap - * @return the version number of the RMap or 0 if no version was found - */ - public static long getVersion(final URI uri) { - for (NameValuePair param : URLEncodedUtils.parse(uri, "UTF-8")) { - if (param.getName().equals("version")) { - switch (param.getValue().toUpperCase()) { - case "NEXT": - return NEXT; - case "CURRENT": - return CURRENT; - default: - try { - return Long.parseLong(param.getValue()); - } catch (NumberFormatException e) { - /* Ignore if NaN */ - } - } - } - } - return UNKNOWN; - } - - public static boolean isSymbolicVersion(final URI uri) { - return getVersion(uri) < 0; - } - - /** - * Read and return a {@link RMapJSON} of the RMap at the given URI. - * - * @param uri URI of the RMap - * @return a JSON representation of the RMap - * @throws IOException if an (possible transient) exception occurs while - * reading the RMap - * @throws RMapException if any other exception occurs while reading the RMap - */ - public RMapJSON readRMap(final URI uri) throws IOException, RMapException { - URI nonSymbolicURI; - try { - nonSymbolicURI = resolveSymbolicVersion(uri); - String encodedRMap = readRMapAsString(nonSymbolicURI); - return new RMapJSON(nonSymbolicURI, new JSONObject(encodedRMap), - getSignature(encodedRMap)); - } catch (URISyntaxException e) { - throw new RMapException("URI syntax invalid for RMap: " + uri, e); - } catch (JSONException e) { - throw new RMapException( - "Failed to decode JSON string for RMap: " + uri, e); - } catch (NoSuchAlgorithmException e) { - throw new RMapException( - "Failed to generate signature for RMap: " + uri, e); - } - } - - /** - * Get a MD5 hash of the given string. - * - * @param s string to be hashed - * @return a hex String representation of the hash - * @throws NoSuchAlgorithmException if MD5 message digest is unavailable - */ - public static String getSignature(final String s) - throws NoSuchAlgorithmException { - return new String(Hex.encodeHex( - MessageDigest.getInstance("MD5").digest(s.getBytes()))); - } - - /** - * Get a MD5 hash of the given string. - * - * @param s string to be hashed - * @return a hex String representation of the hash - * @throws NoSuchAlgorithmException if MD5 message digest is unavailable - */ - public String getSignature(final URI uri) throws IOException, RMapException { - URI nonSymbolicURI; - try { - nonSymbolicURI = resolveSymbolicVersion(uri); - String encodedRMap = readRMapAsString(nonSymbolicURI); - return getSignature(encodedRMap); - } catch (URISyntaxException e) { - throw new RMapException("URI syntax invalid for RMap: " + uri, e); - } catch (NoSuchAlgorithmException e) { - throw new RMapException( - "Failed to generate signature for RMap: " + uri, e); - } - } - - /** - * Get the scheme, authority (if present) and path of a given URI as a string. - * @param uri - * @return a string containing just the scheme, authority and path - */ - public static String getSchemeAndPath(final URI uri) { - return String.format("%s:%s%s", uri.getScheme(), - uri.getAuthority() != null ? - String.format("//%s", uri.getAuthority()) : "", - uri.getPath()); - } - - /** - * Get a versioned URI for the RMap with given scheme, path and version. - * @param schemeAndPath - * @param version - * @return a URI of the form [scheme]:[authority]//[path]?version=[version] - * @throws URISyntaxException - */ - public static URI getVersionedURI(final String schemeAndPath, - final long version) throws URISyntaxException { - String token = "UNKNOWN"; - - if (version > 0) { - token = String.format("%d", version); - } else if (version == CURRENT) { - token = "CURRENT"; - } else if (version == NEXT) { - token = "NEXT"; - } - - return new URI(String.format("%s?version=%s", schemeAndPath, token)); - } - - /** - * Get a versioned URI for the RMap with given base URI and version. If the - * given URI already contains a version it is overwritten by the given - * version. - * @param uri - * @param version - * @return a URI of the form [scheme]:[authority]//[path]?version=[version] - * @throws URISyntaxException - */ - public static URI getVersionedURI(final URI uri, final long version) - throws URISyntaxException { - return getVersionedURI(getSchemeAndPath(uri), version); - } - - public long getCurrentVersion(final String schemeAndPath) - throws URISyntaxException { - return getVersion(resolveSymbolicVersion( - getVersionedURI(schemeAndPath, CURRENT))); - } - - public long getNextVersion(final String schemeAndPath) - throws URISyntaxException { - return getVersion(resolveSymbolicVersion( - getVersionedURI(schemeAndPath, NEXT))); - } -} diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RegionLocator.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RegionLocator.java deleted file mode 100644 index 6dfaa57..0000000 --- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RegionLocator.java +++ /dev/null @@ -1,142 +0,0 @@ -package org.apache.hadoop.hbase.consensus.rmap; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HServerAddress; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.util.Bytes; -import org.json.JSONException; -import org.json.JSONObject; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.net.URI; -import java.util.*; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentSkipListMap; - -public class RegionLocator { - private static final Logger LOG = LoggerFactory.getLogger( - RegionLocator.class); - - private Configuration conf; - - // regionInfoMap is a mapping from table name to region start key to - // HRegionInfo. This will be used in locateRegion and in turn in - // HConnection.locateRegion, so it needs to be thread-safe as the same - // HConnection can be used from multiple threads at the same time - ConcurrentHashMap> - regionInfoMap = new ConcurrentHashMap<>(); - - public RegionLocator(final Configuration conf) { - this.conf = conf; - } - - public HRegionInfo findRegion(byte[] tableName, byte[] row) { - ConcurrentSkipListMap regions = - regionInfoMap.get(Bytes.toString(tableName)); - if (regions != null) { - Map.Entry entry = regions.floorEntry(row); - if (entry != null) { - return entry.getValue(); - } - } - return null; - } - - public List getAllTables() { - List tables = new ArrayList<>(regionInfoMap.size()); - for (ConcurrentSkipListMap regionMapForTable : - regionInfoMap.values()) { - if (regionMapForTable.size() > 0) { - tables.add(regionMapForTable.firstEntry().getValue().getTableDesc()); - } - } - return tables; - } - - public List> getAllRegionsGroupByTable() { - List> regions = new ArrayList<>(regionInfoMap.size()); - for (ConcurrentSkipListMap regionMapForTable : - regionInfoMap.values()) { - regions.add(new ArrayList<>(regionMapForTable.values())); - } - return regions; - } - - /** - * Get all servers found in the regionInfo map. This method iterates over all - * HRegionInfo entries and thus might be expensive. - * - * @return a set containing all servers found in the region map - */ - public Set getAllServers() { - Set servers = new HashSet<>(); - for (ConcurrentSkipListMap regionMapForTable : - regionInfoMap.values()) { - for (HRegionInfo region : regionMapForTable.values()) { - for (HServerAddress server : region.getPeersWithRank().keySet()) { - servers.add(server); - } - } - } - return servers; - } - - public List getRegionsForTable(byte[] tableName) { - ConcurrentSkipListMap regions = - regionInfoMap.get(Bytes.toString(tableName)); - if (regions != null) { - return new ArrayList<>(regions.values()); - } else { - return null; - } - } - - public List getRegionsForServer(final HServerAddress address) { - List regions = new ArrayList<>(); - for (ConcurrentSkipListMap regionMapForTable : - regionInfoMap.values()) { - for (HRegionInfo region : regionMapForTable.values()) { - if (region.getPeersWithRank().containsKey(address)) { - regions.add(region); - } - } - } - return regions; - } - - private void updateRegionInfoMap(final List regions) { - for (HRegionInfo region : regions) { - String tableName = region.getTableDesc().getNameAsString(); - ConcurrentSkipListMap regionMapForTable - = regionInfoMap.get(tableName); - if (regionMapForTable == null) { - regionMapForTable = new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); - regionInfoMap.put(tableName, regionMapForTable); - } - regionMapForTable.put(region.getStartKey(), region); - } - } - - public void refresh() throws IOException, RMapException { - Parser parser = new Parser(conf); - - URI uri = RMapConfiguration.getRMapSubscription(conf); - if (uri != null) { - RMapReader reader = RMapConfiguration.getRMapReader(conf, uri); - - try { - JSONObject encodedRMap = reader.readRMap(uri).getEncodedRMap(); - updateRegionInfoMap(parser.parseEncodedRMap(encodedRMap)); - } catch (JSONException e) { - throw new RMapException("Failed to decode JSON for RMap: " + uri, e); - } - } - } - - public boolean isEmpty() { - return regionInfoMap.isEmpty(); - } -} diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/server/LocalConsensusServer.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/server/LocalConsensusServer.java index 3de7ab8..777d917 100644 --- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/server/LocalConsensusServer.java +++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/server/LocalConsensusServer.java @@ -19,7 +19,6 @@ import org.apache.commons.cli.Options; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.consensus.client.QuorumClient; import org.apache.hadoop.hbase.consensus.quorum.AggregateTimer; @@ -321,12 +320,12 @@ public class LocalConsensusServer { } // Set the region with the peers - HRegionInfo regionInfo = RaftUtil.createDummyRegionInfo(regionId, peers); + QuorumInfo quorumInfo = RaftUtil.createDummyQuorumInfo(regionId, peers); // Create the RaftQuorumContext - RaftQuorumContext context = new RaftQuorumContext(regionInfo.getQuorumInfo(), + RaftQuorumContext context = new RaftQuorumContext(quorumInfo, configuration, localHost, - (regionInfo.getTableDesc().getNameAsString() + "."), + (regionId + "."), consensusServer.aggregateTimer, consensusServer.serialExecutorService, consensusServer.execServiceForThriftClients diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/util/RaftUtil.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/util/RaftUtil.java index d73cb8e..d9e41b8 100644 --- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/util/RaftUtil.java +++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/consensus/util/RaftUtil.java @@ -9,10 +9,10 @@ import com.facebook.swift.service.ThriftClientManager; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableSet; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.consensus.quorum.AggregateTimer; +import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo; import org.apache.hadoop.hbase.consensus.quorum.RepeatingTimer; import org.apache.hadoop.hbase.consensus.quorum.TimeoutEventHandler; import org.apache.hadoop.hbase.consensus.quorum.Timer; @@ -51,19 +51,15 @@ public class RaftUtil { } } - public static HRegionInfo createDummyRegionInfo(String region) { - return createDummyRegionInfo(region, null); + public static QuorumInfo createDummyQuorumInfo(String region) { + return createDummyQuorumInfo(region, null); } - public static HRegionInfo createDummyRegionInfo(String region, Map peers) { - HRegionInfo regionInfo = new HRegionInfo(new HTableDescriptor(region), - Bytes.toBytes("00000000"), Bytes.toBytes("ffffffff"), false, 1000, - null, null); Map> peerMap = new HashMap<>(); - peerMap.put(HRegionInfo.LOCAL_DC_KEY, peers); - regionInfo.setPeers(peerMap); - return regionInfo; + peerMap.put(QuorumInfo.LOCAL_DC_KEY, peers); + return new QuorumInfo(peerMap, region); } public static String listToString(List list) { diff --git a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractWAL.java b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractWAL.java index 2ae84d1..ea5b5c9 100644 --- a/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractWAL.java +++ b/hbase-consensus/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractWAL.java @@ -1,6 +1,5 @@ package org.apache.hadoop.hbase.regionserver.wal; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.regionserver.metrics.PercentileMetric; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Histogram; @@ -30,10 +29,11 @@ public abstract class AbstractWAL { PercentileMetric.HISTOGRAM_MINVALUE_DEFAULT, PercentileMetric.HISTOGRAM_MAXVALUE_DEFAULT); + /* public abstract long append(HRegionInfo info, byte [] tableName, WALEdit edits, final long now) throws IOException, ExecutionException, InterruptedException; - + */ public abstract long startMemStoreFlush(final byte[] regionName); public abstract void completeMemStoreFlush(final byte[] regionName, final byte[] tableName, final long logSeqId, final boolean isMetaRegion); diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/LocalTestBed.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/LocalTestBed.java index 9f7a7c3..25b879d 100644 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/LocalTestBed.java +++ b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/LocalTestBed.java @@ -2,12 +2,12 @@ package org.apache.hadoop.hbase.consensus; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.consensus.exceptions.LeaderNotReadyException; import org.apache.hadoop.hbase.consensus.exceptions.NewLeaderException; import org.apache.hadoop.hbase.consensus.quorum.QuorumAgent; +import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo; import org.apache.hadoop.hbase.consensus.quorum.RaftQuorumContext; import org.apache.hadoop.hbase.consensus.server.InstrumentedConsensusServiceImpl; import org.apache.hadoop.hbase.consensus.server.LocalConsensusServer; @@ -37,7 +37,7 @@ public class LocalTestBed { private static int nextPortNumber; private Configuration conf; - private static HRegionInfo regionInfo; + private static QuorumInfo quorumInfo; private static RaftTestUtil RAFT_TEST_UTIL = new RaftTestUtil(); private final List mockLogs; private boolean checkLeaderCount = false; @@ -53,14 +53,14 @@ public class LocalTestBed { public final long BigBang = System.currentTimeMillis(); public static class HealthChecker extends Thread { - private HRegionInfo regionInfo; + private QuorumInfo quorumInfo; private RaftTestUtil testUtil; private boolean autoRestartThriftServer; private long checkInterval; private AtomicBoolean time2die = new AtomicBoolean(false); - public HealthChecker(HRegionInfo regionInfo, RaftTestUtil testUtil, boolean autoRestartThriftServer, long checkInterval) { - this.regionInfo = regionInfo; + public HealthChecker(QuorumInfo quorumInfo, RaftTestUtil testUtil, boolean autoRestartThriftServer, long checkInterval) { + this.quorumInfo = quorumInfo; this.testUtil = testUtil; this.autoRestartThriftServer = autoRestartThriftServer; this.checkInterval = checkInterval; @@ -74,7 +74,7 @@ public class LocalTestBed { long now = System.currentTimeMillis(); if (now >= previousCheckTime + checkInterval) { LOG.info("checking the health of all quorum members ......"); - testUtil.checkHealth(regionInfo, autoRestartThriftServer); + testUtil.checkHealth(quorumInfo, autoRestartThriftServer); previousCheckTime = now = System.currentTimeMillis(); } long sleepTime = previousCheckTime + checkInterval - now; @@ -219,9 +219,9 @@ public class LocalTestBed { Set currentHiccups = new HashSet(); Set allHiccups = new HashSet(); // include future hiccups Set nohiccups = new HashSet(); - for (HServerAddress dst : regionInfo.getPeersWithRank().keySet()) { + for (HServerAddress dst : quorumInfo.getPeersWithRank().keySet()) { dst = RaftUtil.getLocalConsensusAddress(dst); - for (HServerAddress src : regionInfo.getPeersWithRank().keySet()) { + for (HServerAddress src : quorumInfo.getPeersWithRank().keySet()) { src = RaftUtil.getLocalConsensusAddress(src); if (src.equals(dst)) { continue; @@ -273,11 +273,11 @@ public class LocalTestBed { } } - for (HServerAddress dst : regionInfo.getPeersWithRank().keySet()) { + for (HServerAddress dst : quorumInfo.getPeersWithRank().keySet()) { dst = RaftUtil.getLocalConsensusAddress(dst); InstrumentedConsensusServiceImpl service = (InstrumentedConsensusServiceImpl) (servers.get(dst.getHostAddressWithPort()).getHandler()); - for (HServerAddress src : regionInfo.getPeersWithRank().keySet()) { + for (HServerAddress src : quorumInfo.getPeersWithRank().keySet()) { src = RaftUtil.getLocalConsensusAddress(src); if (!src.equals(dst) && !clear) { long delay = (long)(prng.nextDouble()*(maxRandomDelay-minRandomDelay) + minRandomDelay); @@ -698,7 +698,7 @@ public class LocalTestBed { LOG.info("Clearing network states ......"); adversary.getChaos().updateNetworkStates(System.currentTimeMillis(), true); LOG.info("-------- Verifying log consistency amongst all quorum members"); - while (!RAFT_TEST_UTIL.verifyLogs(regionInfo.getQuorumInfo(), QUORUM_SIZE, true)) { + while (!RAFT_TEST_UTIL.verifyLogs(quorumInfo, QUORUM_SIZE, true)) { testbed.dumpStates(); if (testbed.checkLeaderCount()) { Assert.assertTrue(testbed.getLeaderCount() < 2); @@ -732,10 +732,10 @@ public class LocalTestBed { RAFT_TEST_UTIL.setUsePeristentLog(usePersistentLog); RAFT_TEST_UTIL.createRaftCluster(QUORUM_SIZE); RAFT_TEST_UTIL.assertAllServersRunning(); - regionInfo = RAFT_TEST_UTIL.initializePeers(); - RAFT_TEST_UTIL.addQuorum(regionInfo, mockLogs); - RAFT_TEST_UTIL.startQuorum(regionInfo); - checker = new HealthChecker(regionInfo, RAFT_TEST_UTIL, autoRestartThriftServer, 30000L); + quorumInfo = RAFT_TEST_UTIL.initializePeers(); + RAFT_TEST_UTIL.addQuorum(quorumInfo, mockLogs); + RAFT_TEST_UTIL.startQuorum(quorumInfo); + checker = new HealthChecker(quorumInfo, RAFT_TEST_UTIL, autoRestartThriftServer, 30000L); checker.start(); } @@ -766,7 +766,7 @@ public class LocalTestBed { } public void dumpStates() { - RAFT_TEST_UTIL.dumpStates(regionInfo); + RAFT_TEST_UTIL.dumpStates(quorumInfo); LOG.info("Total Commit = " + commitSuccessCount.get()+ " successes and " + commitFailureCount.get() + " failures " + " with " + getPacketDropCount() + "(" + getHiccupPacketDropCount() + ") total-dropped (hiccup) packets " + " and " + RAFT_TEST_UTIL.getServerRestartCount() + " server restarts " @@ -801,11 +801,11 @@ public class LocalTestBed { } agent.syncAppend(edit); // Verify all the logs across the majority are the same - RAFT_TEST_UTIL.verifyLogs(regionInfo.getQuorumInfo(), QUORUM_MAJORITY, false); + RAFT_TEST_UTIL.verifyLogs(quorumInfo, QUORUM_MAJORITY, false); return true; } catch (NewLeaderException e) { LOG.warn("Got a new leader in the quorum: " + e.getNewLeaderAddress()); - RAFT_TEST_UTIL.verifyLogs(regionInfo.getQuorumInfo(), QUORUM_MAJORITY, false); + RAFT_TEST_UTIL.verifyLogs(quorumInfo, QUORUM_MAJORITY, false); } catch (Exception e) { Throwable cause = e; while (cause != null) { @@ -838,7 +838,7 @@ public class LocalTestBed { do { int leaderCnt = 0; for (LocalConsensusServer server : RAFT_TEST_UTIL.getServers().values()) { - RaftQuorumContext c = server.getHandler().getRaftQuorumContext(regionInfo.getEncodedName()); + RaftQuorumContext c = server.getHandler().getRaftQuorumContext(quorumInfo.getQuorumName()); if (c != null && c.isLeader()) { leaderQuorum = c; leaderCnt++; @@ -854,7 +854,7 @@ public class LocalTestBed { public int getLeaderCount() { int leaderCnt = 0; for (LocalConsensusServer server : RAFT_TEST_UTIL.getServers().values()) { - RaftQuorumContext c = server.getHandler().getRaftQuorumContext(regionInfo.getEncodedName()); + RaftQuorumContext c = server.getHandler().getRaftQuorumContext(quorumInfo.getQuorumName()); if (c != null && c.isLeader()) { leaderCnt++; } @@ -862,8 +862,6 @@ public class LocalTestBed { return leaderCnt; } - - public static void noThrowSleep(long ms) { try { Thread.sleep(ms); @@ -916,11 +914,11 @@ public class LocalTestBed { } public long getPacketDropCount() { - return RAFT_TEST_UTIL.getPacketDropCount(regionInfo); + return RAFT_TEST_UTIL.getPacketDropCount(quorumInfo); } public long getHiccupPacketDropCount() { - return RAFT_TEST_UTIL.getHiccupPacketDropCount(regionInfo); + return RAFT_TEST_UTIL.getHiccupPacketDropCount(quorumInfo); } // shoplifted from http://preshing.com/20111007/how-to-generate-random-timings-for-a-poisson-process/ diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/RaftTestUtil.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/RaftTestUtil.java index d259308..a58afdd 100644 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/RaftTestUtil.java +++ b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/RaftTestUtil.java @@ -7,7 +7,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; @@ -181,9 +180,10 @@ public class RaftTestUtil { /** * Creates a new quorum from the given quorum info object. - * @param regionInfo + * @param quorumInfo The quorum info. + * @param mockLogs Mock logs for the quorum members. */ - public void addQuorum(final HRegionInfo regionInfo, List mockLogs) + public void addQuorum(final QuorumInfo quorumInfo, List mockLogs) throws IOException { int i = 0; int[] mockLog; @@ -192,7 +192,7 @@ public class RaftTestUtil { if (mockLogs != null) { mockLog = mockLogs.get(i++); } - addQuorumForServer(server, regionInfo.getQuorumInfo(), mockLog); + addQuorumForServer(server, quorumInfo, mockLog); } } @@ -236,12 +236,12 @@ public class RaftTestUtil { return serverRestartCount.get(); } - public void checkHealth(HRegionInfo regionInfo, boolean reset) { + public void checkHealth(QuorumInfo quorumInfo, boolean reset) { for (LocalConsensusServer server : servers.values()) { boolean healthy = false; LOG.info("Checking the health of ThriftServer for " + server + " ......"); try { - healthy = checkHealth(regionInfo, server); + healthy = checkHealth(quorumInfo, server); } catch (Exception ex) { LOG.error("Failed to check the status for " + server, ex); } @@ -258,7 +258,7 @@ public class RaftTestUtil { } } - public boolean checkHealth(HRegionInfo regionInfo, LocalConsensusServer server) throws Exception { + public boolean checkHealth(QuorumInfo quorumInfo, LocalConsensusServer server) throws Exception { HServerAddress consensusServerAddress = new HServerAddress(LOCAL_HOST, server.getThriftServer().getPort()); int timeout = 5000; @@ -266,13 +266,13 @@ public class RaftTestUtil { QuorumThriftClientAgent agent = new QuorumThriftClientAgent( consensusServerAddress.toString(), timeout, timeout, timeout, 3); LOG.info("QuorumThriftClientAgent for " + consensusServerAddress + " = " + agent); - PeerStatus status = agent.getPeerStatus(regionInfo.getEncodedName()); + PeerStatus status = agent.getPeerStatus(quorumInfo.getQuorumName()); LOG.info("PeerStatus for " + consensusServerAddress + ": " + status); return status != null; } public RaftQuorumContext restartLocalConsensusServer(LocalConsensusServer server, - final HRegionInfo regionInfo, + final QuorumInfo quorumInfo, final String contextAddress) throws IOException { @@ -291,8 +291,8 @@ public class RaftTestUtil { conf.setInt( HConstants.RAFT_LOG_DELETION_INTERVAL_KEY, 100); - RaftQuorumContext context = createRaftQuorumContext( - regionInfo.getQuorumInfo(), conf, consensusServerAddress, server); + RaftQuorumContext context = createRaftQuorumContext(quorumInfo, conf, + consensusServerAddress, server); context.initializeLog(); context.reseedStartIndex(seedIndex); context.startStateMachines(); @@ -300,11 +300,11 @@ public class RaftTestUtil { return context; } - public void startQuorum(final HRegionInfo regionInfo) + public void startQuorum(final QuorumInfo quorumInfo) throws IOException { for (LocalConsensusServer server : servers.values()) { RaftQuorumContext context = - server.getHandler().getRaftQuorumContext(regionInfo.getEncodedName()); + server.getHandler().getRaftQuorumContext(quorumInfo.getQuorumName()); context.initializeLog(); context.reseedStartIndex(seedIndex); context.startStateMachines(); @@ -436,13 +436,13 @@ public class RaftTestUtil { } } - public void dumpStates(final HRegionInfo info) { + public void dumpStates(final QuorumInfo info) { LOG.info("---- logs for region " + info + ":"); List logs = new ArrayList(); List ports = new ArrayList(); for (LocalConsensusServer server : servers.values()) { final CommitLogManagerInterface log = - server.getHandler().getRaftQuorumContext(info.getEncodedName()).getLogManager(); + server.getHandler().getRaftQuorumContext(info.getQuorumName()).getLogManager(); logs.add(log.dumpLogs(-1)); ports.add(server.getThriftServer().getPort()); } @@ -530,7 +530,7 @@ public class RaftTestUtil { } } - public HRegionInfo initializePeers() { + public QuorumInfo initializePeers() { Map peers = new HashMap<>(); int rank = servers.size(); for (LocalConsensusServer server : servers.values()) { @@ -545,19 +545,17 @@ public class RaftTestUtil { HColumnDescriptor hcd = new HColumnDescriptor(FAMILY).setMaxVersions(Integer.MAX_VALUE); table.addFamily(hcd); - HRegionInfo regionInfo = new HRegionInfo(table, Bytes.toBytes("00000000"), - Bytes.toBytes("ffffffff")); Map> peerMap = new HashMap<>(); - peerMap.put(HRegionInfo.LOCAL_DC_KEY, peers); - regionInfo.setPeers(peerMap); + peerMap.put(QuorumInfo.LOCAL_DC_KEY, peers); - return regionInfo; + String quorumName = "dummyTable,123,deadbeef."; + return new QuorumInfo(peerMap, quorumName); } - public HRegionInfo resetPeers(HRegionInfo regionInfo, List logs) throws Exception { + public QuorumInfo resetPeers(QuorumInfo quorumInfo, List logs) throws Exception { - addQuorum(regionInfo, logs); - return regionInfo; + addQuorum(quorumInfo, logs); + return quorumInfo; } public void setSeedIndex(long seedIndex) { @@ -623,12 +621,12 @@ public class RaftTestUtil { return 1; } - public boolean simulatePacketDropForServer(final HRegionInfo regionInfo, int rank, + public boolean simulatePacketDropForServer(final QuorumInfo quorumInfo, int rank, final InstrumentedConsensusServiceImpl.PacketDropStyle style) { HServerAddress server = null; - for (HServerAddress s : regionInfo.getPeersWithRank().keySet()) { - if (regionInfo.getPeersWithRank().get(s) == rank) { + for (HServerAddress s : quorumInfo.getPeersWithRank().keySet()) { + if (quorumInfo.getPeersWithRank().get(s) == rank) { server = s; break; } @@ -645,9 +643,9 @@ public class RaftTestUtil { return true; } - public long getHiccupPacketDropCount(final HRegionInfo regionInfo) { + public long getHiccupPacketDropCount(final QuorumInfo quorumInfo) { long count = 0; - for (HServerAddress server : regionInfo.getPeersWithRank().keySet()) { + for (HServerAddress server : quorumInfo.getPeersWithRank().keySet()) { InstrumentedConsensusServiceImpl service = (InstrumentedConsensusServiceImpl) (servers.get(RaftUtil.getLocalConsensusAddress(server).getHostAddressWithPort()).getHandler()); count += service.getHiccupPacketDropCount(); @@ -655,9 +653,9 @@ public class RaftTestUtil { return count; } - public long getPacketDropCount(final HRegionInfo regionInfo) { + public long getPacketDropCount(final QuorumInfo quorumInfo) { long count = 0; - for (HServerAddress server : regionInfo.getPeersWithRank().keySet()) { + for (HServerAddress server : quorumInfo.getPeersWithRank().keySet()) { InstrumentedConsensusServiceImpl service = (InstrumentedConsensusServiceImpl) (servers.get(RaftUtil.getLocalConsensusAddress(server).getHostAddressWithPort()).getHandler()); count += service.getPacketDropCount(); @@ -669,28 +667,28 @@ public class RaftTestUtil { this.usePersistentLog = usePersistentLog; } - public RaftQuorumContext getRaftQuorumContextByAddress(HRegionInfo regionInfo, + public RaftQuorumContext getRaftQuorumContextByAddress(QuorumInfo quorumInfo, String address) { return getServers().get(address).getHandler() - .getRaftQuorumContext(regionInfo.getEncodedName()); + .getRaftQuorumContext(quorumInfo.getQuorumName()); } - public RaftQuorumContext getRaftQuorumContextByRank(HRegionInfo regionInfo, int rank) { + public RaftQuorumContext getRaftQuorumContextByRank(QuorumInfo quorumInfo, int rank) { String peerAddress = null; - for (HServerAddress addr : regionInfo.getPeersWithRank().keySet()) { - if (regionInfo.getPeersWithRank().get(addr) == rank) { + for (HServerAddress addr : quorumInfo.getPeersWithRank().keySet()) { + if (quorumInfo.getPeersWithRank().get(addr) == rank) { peerAddress = RaftUtil.getLocalConsensusAddress(addr).getHostAddressWithPort(); } } return getServers().get(peerAddress).getHandler(). - getRaftQuorumContext(regionInfo.getEncodedName()); + getRaftQuorumContext(quorumInfo.getQuorumName()); } - public LocalConsensusServer stopLocalConsensusServer(HRegionInfo regionInfo, int rank) { + public LocalConsensusServer stopLocalConsensusServer(QuorumInfo quorumInfo, int rank) { String peerAddress = null; - for (HServerAddress addr : regionInfo.getPeersWithRank().keySet()) { - if (regionInfo.getPeersWithRank().get(addr) == rank) { + for (HServerAddress addr : quorumInfo.getPeersWithRank().keySet()) { + if (quorumInfo.getPeersWithRank().get(addr) == rank) { peerAddress = RaftUtil.getLocalConsensusAddress(addr).getHostAddressWithPort(); } } @@ -705,19 +703,19 @@ public class RaftTestUtil { return server; } - public void printStatusOfQuorum(HRegionInfo regionInfo) { + public void printStatusOfQuorum(QuorumInfo quorumInfo) { System.out.println(" ======= Status Update ========="); for (LocalConsensusServer server : getServers().values()) { RaftQuorumContext context = - server.getHandler().getRaftQuorumContext(regionInfo.getEncodedName()); + server.getHandler().getRaftQuorumContext(quorumInfo.getQuorumName()); System.out.println(context + " ; " + context.getPaxosState() + " ; " + context.getLogState()); } System.out.println(" ================"); } public List getQuorumContexts( - final HRegionInfo regionInfo) { - Set replias = regionInfo.getPeersWithRank().keySet(); + final QuorumInfo quorumInfo) { + Set replias = quorumInfo.getPeersWithRank().keySet(); List contexts = new ArrayList<>(replias.size()); for (HServerAddress address : replias) { @@ -725,16 +723,16 @@ public class RaftTestUtil { RaftUtil.getLocalConsensusAddress(address). getHostAddressWithPort(); if (getServers().containsKey(consensusServerAddress)) { - contexts.add(getRaftQuorumContextByAddress(regionInfo, + contexts.add(getRaftQuorumContextByAddress(quorumInfo, consensusServerAddress)); } } return contexts; } - public RaftQuorumContext getLeaderQuorumContext(HRegionInfo regionInfo) { + public RaftQuorumContext getLeaderQuorumContext(QuorumInfo quorumInfo) { RaftQuorumContext leader = null; - for (RaftQuorumContext context : getQuorumContexts(regionInfo)) { + for (RaftQuorumContext context : getQuorumContexts(quorumInfo)) { if (context.isLeader()) { leader = context; } @@ -742,9 +740,9 @@ public class RaftTestUtil { return leader; } - public void waitForLeader(final HRegionInfo regionInfo) + public void waitForLeader(final QuorumInfo quorumInfo) throws InterruptedException { - while (getLeaderQuorumContext(regionInfo) == null) { + while (getLeaderQuorumContext(quorumInfo) == null) { Thread.sleep(500); } } diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/ReplicationLoadForUnitTest.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/ReplicationLoadForUnitTest.java index e4cf944..4257c0d 100644 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/ReplicationLoadForUnitTest.java +++ b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/ReplicationLoadForUnitTest.java @@ -1,7 +1,7 @@ package org.apache.hadoop.hbase.consensus; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.consensus.client.QuorumClient; +import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; @@ -14,16 +14,16 @@ public class ReplicationLoadForUnitTest { private volatile boolean stop = false; private RaftTestUtil util; - private HRegionInfo regionInfo; + private QuorumInfo quorumInfo; private QuorumClient client; private int quorumSize = 5; private int majoritySize = 3; private volatile long sleepTime = 50; - public ReplicationLoadForUnitTest(HRegionInfo regionInfo, QuorumClient client, + public ReplicationLoadForUnitTest(QuorumInfo quorumInfo, QuorumClient client, RaftTestUtil util, int quorumSize, int majoritySize) { - this.regionInfo = regionInfo; + this.quorumInfo = quorumInfo; this.client = client; this.util = util; this.quorumSize = quorumSize; @@ -34,11 +34,11 @@ public class ReplicationLoadForUnitTest { public int makeProgress(long sleepTime, int prevLoad) throws InterruptedException { System.out.println("Let the client load fly for " + sleepTime + " ms"); Thread.sleep(sleepTime); - util.printStatusOfQuorum(regionInfo); + util.printStatusOfQuorum(quorumInfo); while (transactionNums <= prevLoad) { System.out.println("No Progress ! prev " + prevLoad + " current " + transactionNums); - util.printStatusOfQuorum(regionInfo); + util.printStatusOfQuorum(quorumInfo); Thread.sleep(sleepTime); } @@ -59,7 +59,7 @@ public class ReplicationLoadForUnitTest { client.replicateCommits(RaftTestUtil.generateTransaction(1 * 1024)); if ((++transactionNums) % progressInterval == 0) { System.out.println("Sent " + transactionNums + " transactions to the quorum"); - util.printStatusOfQuorum(regionInfo); + util.printStatusOfQuorum(quorumInfo); } } catch (Exception e) { diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicCommit.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicCommit.java index b54c2a6..96e9129 100644 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicCommit.java +++ b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicCommit.java @@ -1,23 +1,21 @@ package org.apache.hadoop.hbase.consensus; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.consensus.client.QuorumClient; +import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo; import org.apache.hadoop.hbase.consensus.quorum.RaftQuorumContext; import org.apache.hadoop.hbase.consensus.server.LocalConsensusServer; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.IOException; public class TestBasicCommit { private static int QUORUM_SIZE = 3; private static int QUORUM_MAJORITY = 2; - private static HRegionInfo regionInfo; + private static QuorumInfo quorumInfo; private static RaftTestUtil RAFT_TEST_UTIL = new RaftTestUtil(); private static QuorumClient client; private static volatile int transactionNum = 0; @@ -31,12 +29,12 @@ public class TestBasicCommit { RAFT_TEST_UTIL.createRaftCluster(QUORUM_SIZE); RAFT_TEST_UTIL.setUsePeristentLog(true); RAFT_TEST_UTIL.assertAllServersRunning(); - regionInfo = RAFT_TEST_UTIL.initializePeers(); - RAFT_TEST_UTIL.addQuorum(regionInfo, RAFT_TEST_UTIL.getScratchSetup(QUORUM_SIZE)); - RAFT_TEST_UTIL.startQuorum(regionInfo); - client = RAFT_TEST_UTIL.getQuorumClient(regionInfo.getQuorumInfo()); + quorumInfo = RAFT_TEST_UTIL.initializePeers(); + RAFT_TEST_UTIL.addQuorum(quorumInfo, RAFT_TEST_UTIL.getScratchSetup(QUORUM_SIZE)); + RAFT_TEST_UTIL.startQuorum(quorumInfo); + client = RAFT_TEST_UTIL.getQuorumClient(quorumInfo); transactionNum = 0; - loader = new ReplicationLoadForUnitTest(regionInfo, client, RAFT_TEST_UTIL, QUORUM_SIZE, + loader = new ReplicationLoadForUnitTest(quorumInfo, client, RAFT_TEST_UTIL, QUORUM_SIZE, QUORUM_MAJORITY); } @@ -59,17 +57,17 @@ public class TestBasicCommit { transactionNum = loader.makeProgress(sleepTime, transactionNum); // Get all the quorum contexts from rank 3 to rank 1 - RaftQuorumContext c3 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, 3); - RaftQuorumContext c2 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, 2); - RaftQuorumContext c1 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, 1); + RaftQuorumContext c3 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, 3); + RaftQuorumContext c2 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, 2); + RaftQuorumContext c1 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, 1); // Shutdown 1st quorum member whose rank is 1. System.out.println("Stopping one quorum member: " + c1); - LocalConsensusServer s1 = RAFT_TEST_UTIL.stopLocalConsensusServer(regionInfo, 1); + LocalConsensusServer s1 = RAFT_TEST_UTIL.stopLocalConsensusServer(quorumInfo, 1); // Shutdown 2nd quorum member whose rank 2 System.out.println("Stopping another quorum member: " + c2); - LocalConsensusServer s2 = RAFT_TEST_UTIL.stopLocalConsensusServer(regionInfo, 2); + LocalConsensusServer s2 = RAFT_TEST_UTIL.stopLocalConsensusServer(quorumInfo, 2); // Sleep for some time to make sure the leader is stuck in retry @@ -77,18 +75,18 @@ public class TestBasicCommit { // Shutdown 3rd quorum member whose rank 3 System.out.println("Stopping another quorum member: " + c3); - LocalConsensusServer s3 = RAFT_TEST_UTIL.stopLocalConsensusServer(regionInfo, 3); + LocalConsensusServer s3 = RAFT_TEST_UTIL.stopLocalConsensusServer(quorumInfo, 3); // Restart 3 - c3 = RAFT_TEST_UTIL.restartLocalConsensusServer(s3, regionInfo, c3.getMyAddress()); + c3 = RAFT_TEST_UTIL.restartLocalConsensusServer(s3, quorumInfo, c3.getMyAddress()); System.out.println("Restarted one quorum member: " + c3); // Restart 2 - c2 = RAFT_TEST_UTIL.restartLocalConsensusServer(s2, regionInfo, c2.getMyAddress()); + c2 = RAFT_TEST_UTIL.restartLocalConsensusServer(s2, quorumInfo, c2.getMyAddress()); System.out.println("Restarted one quorum member: " + c2); // Restart 1 - c1 = RAFT_TEST_UTIL.restartLocalConsensusServer(s1, regionInfo, c1.getMyAddress()); + c1 = RAFT_TEST_UTIL.restartLocalConsensusServer(s1, quorumInfo, c1.getMyAddress()); System.out.println("Restarted one quorum member: " + c1); // Let the traffic fly for a while @@ -106,7 +104,7 @@ public class TestBasicCommit { loader.slowDownReplicationLoad(); // Verify logs are identical across all the quorum members - while (!RAFT_TEST_UTIL.verifyLogs(regionInfo.getQuorumInfo(), QUORUM_SIZE)) { + while (!RAFT_TEST_UTIL.verifyLogs(quorumInfo, QUORUM_SIZE)) { Thread.sleep(10 * 1000); System.out.println("Verifying logs ...."); Assert.assertTrue("Rank 3 shall be the leader of the quorum", c3.isLeader()); diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicLeaderElection.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicLeaderElection.java index de31eea..352c4f2 100644 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicLeaderElection.java +++ b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicLeaderElection.java @@ -1,7 +1,7 @@ package org.apache.hadoop.hbase.consensus; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo; import org.apache.hadoop.hbase.consensus.quorum.RaftQuorumContext; import org.apache.hadoop.hbase.consensus.server.LocalConsensusServer; import org.junit.After; @@ -19,7 +19,7 @@ import static junit.framework.Assert.assertEquals; public class TestBasicLeaderElection { private static int QUORUM_SIZE = 5; - private static HRegionInfo regionInfo; + private static QuorumInfo quorumInfo; private static RaftTestUtil RAFT_TEST_UTIL = new RaftTestUtil(); private final List mockLogs; @@ -28,9 +28,9 @@ public class TestBasicLeaderElection { RAFT_TEST_UTIL.createRaftCluster(QUORUM_SIZE); RAFT_TEST_UTIL.setUsePeristentLog(true); RAFT_TEST_UTIL.assertAllServersRunning(); - regionInfo = RAFT_TEST_UTIL.initializePeers(); - RAFT_TEST_UTIL.addQuorum(regionInfo, mockLogs); - RAFT_TEST_UTIL.startQuorum(regionInfo); + quorumInfo = RAFT_TEST_UTIL.initializePeers(); + RAFT_TEST_UTIL.addQuorum(quorumInfo, mockLogs); + RAFT_TEST_UTIL.startQuorum(quorumInfo); } @After @@ -62,13 +62,14 @@ public class TestBasicLeaderElection { } catch (InterruptedException e) {} for (LocalConsensusServer server : RAFT_TEST_UTIL.getServers().values()) { - RaftQuorumContext c = server.getHandler().getRaftQuorumContext(regionInfo.getEncodedName()); + RaftQuorumContext c = server.getHandler().getRaftQuorumContext( + quorumInfo.getQuorumName()); if (c.isLeader()) { leaderCnt++; } } } - while(!RAFT_TEST_UTIL.verifyLogs(regionInfo.getQuorumInfo(), QUORUM_SIZE) && leaderCnt != 1); + while(!RAFT_TEST_UTIL.verifyLogs(quorumInfo, QUORUM_SIZE) && leaderCnt != 1); assertEquals("There should be only one leader", 1, leaderCnt); } } diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicPeerFailure.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicPeerFailure.java index ceab1f5..30658d1 100644 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicPeerFailure.java +++ b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicPeerFailure.java @@ -1,8 +1,8 @@ package org.apache.hadoop.hbase.consensus; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.consensus.client.QuorumClient; +import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo; import org.apache.hadoop.hbase.consensus.quorum.RaftQuorumContext; import org.apache.hadoop.hbase.consensus.server.LocalConsensusServer; import org.junit.After; @@ -19,7 +19,7 @@ public class TestBasicPeerFailure { TestBasicPeerFailure.class); private static int QUORUM_SIZE = 5; private static int QUORUM_MAJORITY = 3; - private static HRegionInfo regionInfo; + private static QuorumInfo quorumInfo; private static RaftTestUtil RAFT_TEST_UTIL = new RaftTestUtil(); private static QuorumClient client; private static volatile int transactionNum = 0; @@ -31,12 +31,12 @@ public class TestBasicPeerFailure { RAFT_TEST_UTIL.createRaftCluster(QUORUM_SIZE); RAFT_TEST_UTIL.setUsePeristentLog(true); RAFT_TEST_UTIL.assertAllServersRunning(); - regionInfo = RAFT_TEST_UTIL.initializePeers(); - RAFT_TEST_UTIL.addQuorum(regionInfo, RAFT_TEST_UTIL.getScratchSetup(QUORUM_SIZE)); - RAFT_TEST_UTIL.startQuorum(regionInfo); - client = RAFT_TEST_UTIL.getQuorumClient(regionInfo.getQuorumInfo()); + quorumInfo = RAFT_TEST_UTIL.initializePeers(); + RAFT_TEST_UTIL.addQuorum(quorumInfo, RAFT_TEST_UTIL.getScratchSetup(QUORUM_SIZE)); + RAFT_TEST_UTIL.startQuorum(quorumInfo); + client = RAFT_TEST_UTIL.getQuorumClient(quorumInfo); transactionNum = 0; - loader = new ReplicationLoadForUnitTest(regionInfo, client, RAFT_TEST_UTIL, QUORUM_SIZE, + loader = new ReplicationLoadForUnitTest(quorumInfo, client, RAFT_TEST_UTIL, QUORUM_SIZE, QUORUM_MAJORITY); } @@ -61,7 +61,7 @@ public class TestBasicPeerFailure { public void testStepDownOnNoProgress() throws InterruptedException { final long sleepTime = 2 * HConstants.PROGRESS_TIMEOUT_INTERVAL_IN_MILLISECONDS; - RaftQuorumContext c5 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, 5); + RaftQuorumContext c5 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, 5); // Start the client load loader.startReplicationLoad(100); @@ -73,7 +73,7 @@ public class TestBasicPeerFailure { // Stop the majority of replicas for (int i = 0; i < QUORUM_MAJORITY; i++) { System.out.println("Stopping replica with rank " + (i + 1)); - RAFT_TEST_UTIL.stopLocalConsensusServer(regionInfo, i + 1); + RAFT_TEST_UTIL.stopLocalConsensusServer(quorumInfo, i + 1); } Thread.sleep(2 * HConstants.PROGRESS_TIMEOUT_INTERVAL_IN_MILLISECONDS); @@ -99,13 +99,13 @@ public class TestBasicPeerFailure { transactionNum = loader.makeProgress(sleepTime, transactionNum); // Get all the quorum contexts from rank 5 to rank 3 - RaftQuorumContext c5 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, 5); - RaftQuorumContext c4 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, 4); - RaftQuorumContext c3 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, 3); + RaftQuorumContext c5 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, 5); + RaftQuorumContext c4 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, 4); + RaftQuorumContext c3 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, 3); // Shutdown 1st quorum member whose rank is 5. System.out.println("Stopping one quorum member: " + c5); - LocalConsensusServer s5 = RAFT_TEST_UTIL.stopLocalConsensusServer(regionInfo, 5); + LocalConsensusServer s5 = RAFT_TEST_UTIL.stopLocalConsensusServer(quorumInfo, 5); // Let the traffic fly for a while if ((++failureCnt % failureInterval) == 0) { @@ -115,7 +115,7 @@ public class TestBasicPeerFailure { // Shutdown 2nd quorum member whose rank 4 System.out.println("Stopping another quorum member: " + c4); - LocalConsensusServer s4 = RAFT_TEST_UTIL.stopLocalConsensusServer(regionInfo, 4); + LocalConsensusServer s4 = RAFT_TEST_UTIL.stopLocalConsensusServer(quorumInfo, 4); // Let the traffic fly for a while if ((++failureCnt % failureInterval) == 0) { @@ -124,7 +124,7 @@ public class TestBasicPeerFailure { } // Restart the quorum member whose rank is 4 - c4 = RAFT_TEST_UTIL.restartLocalConsensusServer(s4, regionInfo, c4.getMyAddress()); + c4 = RAFT_TEST_UTIL.restartLocalConsensusServer(s4, quorumInfo, c4.getMyAddress()); System.out.println("Restarted one quorum member: " + c4); // Let the traffic fly for a while @@ -139,7 +139,7 @@ public class TestBasicPeerFailure { transactionNum = loader.makeProgress(sleepTime, transactionNum); } // Restart the quorum member whose rank is 5 - c5 = RAFT_TEST_UTIL.restartLocalConsensusServer(s5, regionInfo, c5.getMyAddress()); + c5 = RAFT_TEST_UTIL.restartLocalConsensusServer(s5, quorumInfo, c5.getMyAddress()); System.out.println("Restarted another quorum member: " + c5); // Let the traffic fly for a while @@ -156,7 +156,7 @@ public class TestBasicPeerFailure { loader.slowDownReplicationLoad(); // Verify logs are identical across all the quorum members - while (!RAFT_TEST_UTIL.verifyLogs(regionInfo.getQuorumInfo(), QUORUM_SIZE)) { + while (!RAFT_TEST_UTIL.verifyLogs(quorumInfo, QUORUM_SIZE)) { Thread.sleep(10 * 1000); System.out.println("Verifying logs ...."); Assert.assertTrue("Rank 5 shall be the leader of the quorum", c5.isLeader()); diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicPeerSeeding.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicPeerSeeding.java index 02dc112..b2080f3 100644 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicPeerSeeding.java +++ b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicPeerSeeding.java @@ -7,9 +7,9 @@ import java.util.Collection; import java.util.List; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.consensus.client.QuorumClient; +import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; import org.junit.After; @@ -28,7 +28,7 @@ public class TestBasicPeerSeeding { private static final int QUORUM_SIZE = 5; private static final int QUORUM_MAJORITY = 3; - private static HRegionInfo regionInfo; + private static QuorumInfo quorumInfo; private static RaftTestUtil RAFT_TEST_UTIL = new RaftTestUtil(); private final List mockLogs; private final long seedIndex = 4; @@ -39,11 +39,11 @@ public class TestBasicPeerSeeding { RAFT_TEST_UTIL.createRaftCluster(QUORUM_SIZE); RAFT_TEST_UTIL.assertAllServersRunning(); RAFT_TEST_UTIL.setUsePeristentLog(true); - regionInfo = RAFT_TEST_UTIL.initializePeers(); - RAFT_TEST_UTIL.addQuorum(regionInfo, mockLogs); + quorumInfo = RAFT_TEST_UTIL.initializePeers(); + RAFT_TEST_UTIL.addQuorum(quorumInfo, mockLogs); RAFT_TEST_UTIL.setSeedIndex(seedIndex); - RAFT_TEST_UTIL.startQuorum(regionInfo); - client = RAFT_TEST_UTIL.getQuorumClient(regionInfo.getQuorumInfo()); + RAFT_TEST_UTIL.startQuorum(quorumInfo); + client = RAFT_TEST_UTIL.getQuorumClient(quorumInfo); } @After @@ -67,8 +67,8 @@ public class TestBasicPeerSeeding { LOG.info("Passed the " + i + " commit !"); } // Verify all the logs across the quorum are the same - while(!RAFT_TEST_UTIL.verifyLogs(regionInfo.getQuorumInfo(), QUORUM_SIZE)) { - RAFT_TEST_UTIL.dumpStates(regionInfo); + while(!RAFT_TEST_UTIL.verifyLogs(quorumInfo, QUORUM_SIZE)) { + RAFT_TEST_UTIL.dumpStates(quorumInfo); try { // Sleep for MAX_TIMEOUT time for leader election to complete Thread.sleep(HConstants.QUORUM_CLIENT_COMMIT_DEADLINE_DEFAULT); @@ -82,11 +82,11 @@ public class TestBasicPeerSeeding { private void testSingleCommit() { try { - RAFT_TEST_UTIL.dumpStates(regionInfo); + RAFT_TEST_UTIL.dumpStates(quorumInfo); client.replicateCommits(Arrays.asList(generateTestingWALEdit())); - RAFT_TEST_UTIL.dumpStates(regionInfo); + RAFT_TEST_UTIL.dumpStates(quorumInfo); // Verify all the logs across the majority are the same - RAFT_TEST_UTIL.verifyLogs(regionInfo.getQuorumInfo(), QUORUM_MAJORITY); + RAFT_TEST_UTIL.verifyLogs(quorumInfo, QUORUM_MAJORITY); } catch (Exception e) { LOG.error("Errors: ", e); fail("Unexpected exception: e"); diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicPeerSlow.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicPeerSlow.java index 138c674..7851078 100644 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicPeerSlow.java +++ b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicPeerSlow.java @@ -1,8 +1,8 @@ package org.apache.hadoop.hbase.consensus; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.consensus.client.QuorumClient; +import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo; import org.apache.hadoop.hbase.consensus.quorum.RaftQuorumContext; import org.apache.hadoop.hbase.consensus.server.InstrumentedConsensusServiceImpl; import org.junit.After; @@ -22,7 +22,7 @@ import java.util.concurrent.TimeUnit; public class TestBasicPeerSlow { private static int QUORUM_SIZE = 5; private static int QUORUM_MAJORITY = 3; - private static HRegionInfo regionInfo; + private static QuorumInfo quorumInfo; private static RaftTestUtil RAFT_TEST_UTIL = new RaftTestUtil(); private static QuorumClient client; private static volatile int transactionNums = 0; @@ -38,10 +38,10 @@ public class TestBasicPeerSlow { RAFT_TEST_UTIL.createRaftCluster(QUORUM_SIZE); RAFT_TEST_UTIL.setUsePeristentLog(true); RAFT_TEST_UTIL.assertAllServersRunning(); - regionInfo = RAFT_TEST_UTIL.initializePeers(); - RAFT_TEST_UTIL.addQuorum(regionInfo, RaftTestUtil.getScratchSetup(QUORUM_SIZE)); - RAFT_TEST_UTIL.startQuorum(regionInfo); - client = RAFT_TEST_UTIL.getQuorumClient(regionInfo.getQuorumInfo()); + quorumInfo = RAFT_TEST_UTIL.initializePeers(); + RAFT_TEST_UTIL.addQuorum(quorumInfo, RaftTestUtil.getScratchSetup(QUORUM_SIZE)); + RAFT_TEST_UTIL.startQuorum(quorumInfo); + client = RAFT_TEST_UTIL.getQuorumClient(quorumInfo); transactionNums = 0; stop = false; @@ -122,8 +122,8 @@ public class TestBasicPeerSlow { InstrumentedConsensusServiceImpl.PacketDropStyle style = InstrumentedConsensusServiceImpl.PacketDropStyle.values()[event[1]]; - RaftQuorumContext context = RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, rank); - RAFT_TEST_UTIL.simulatePacketDropForServer(regionInfo, rank, style); + RaftQuorumContext context = RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, rank); + RAFT_TEST_UTIL.simulatePacketDropForServer(quorumInfo, rank, style); System.out.println("Set package drop for the quorum: " + context + " as " + style); } @@ -148,8 +148,8 @@ public class TestBasicPeerSlow { InstrumentedConsensusServiceImpl.PacketDropStyle nodrop = InstrumentedConsensusServiceImpl.PacketDropStyle.NONE; - RaftQuorumContext context = RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, rank); - RAFT_TEST_UTIL.simulatePacketDropForServer(regionInfo, rank, nodrop); + RaftQuorumContext context = RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, rank); + RAFT_TEST_UTIL.simulatePacketDropForServer(quorumInfo, rank, nodrop); System.out.println("Reset package drop for the quorum: " + context + " as " + nodrop); } @@ -161,7 +161,7 @@ public class TestBasicPeerSlow { makeProgress(sleepTime, transactionNums, true); // Verify logs are identical across all the quorum members - while (!RAFT_TEST_UTIL.verifyLogs(regionInfo.getQuorumInfo(), QUORUM_SIZE)) { + while (!RAFT_TEST_UTIL.verifyLogs(quorumInfo, QUORUM_SIZE)) { Thread.sleep(5 * 1000); clientTrafficFrequency = clientTrafficFrequency * 10; System.out.println("Verifying logs ...."); @@ -177,13 +177,13 @@ public class TestBasicPeerSlow { throws InterruptedException { System.out.println("Let the client load fly for " + sleepTime + " ms"); Thread.sleep(sleepTime); - RAFT_TEST_UTIL.printStatusOfQuorum(regionInfo); + RAFT_TEST_UTIL.printStatusOfQuorum(quorumInfo); int i = 0; while ((waitForProgress && transactionNums <= prevLoad) || (!waitForProgress && (++i <= 1))) { System.out.println("No Progress ! prev " + prevLoad + " current " + transactionNums); - RAFT_TEST_UTIL.printStatusOfQuorum(regionInfo); + RAFT_TEST_UTIL.printStatusOfQuorum(quorumInfo); Thread.sleep(sleepTime); } @@ -206,7 +206,7 @@ public class TestBasicPeerSlow { if ((++transactionNums) % progressInterval == 0) { System.out.println("Sent " + transactionNums + "transactions to the quorum"); - RAFT_TEST_UTIL.printStatusOfQuorum(regionInfo); + RAFT_TEST_UTIL.printStatusOfQuorum(quorumInfo); } } catch (Exception e) { System.out.print(String.format("Cannot replicate transaction" + e)); diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicQuorumCommit.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicQuorumCommit.java index 1c63a47..bb89597 100644 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicQuorumCommit.java +++ b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicQuorumCommit.java @@ -1,18 +1,14 @@ package org.apache.hadoop.hbase.consensus; import org.apache.hadoop.hbase.consensus.client.QuorumClient; +import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.consensus.quorum.QuorumAgent; -import org.apache.hadoop.hbase.consensus.quorum.RaftQuorumContext; -import org.apache.hadoop.hbase.consensus.server.LocalConsensusServer; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -32,7 +28,7 @@ public class TestBasicQuorumCommit { private static final int QUORUM_SIZE = 5; private static final int QUORUM_MAJORITY = 3; - private static HRegionInfo regionInfo; + private static QuorumInfo quorumInfo; private static RaftTestUtil RAFT_TEST_UTIL = new RaftTestUtil(); private final List mockLogs; private QuorumClient client; @@ -42,10 +38,10 @@ public class TestBasicQuorumCommit { RAFT_TEST_UTIL.createRaftCluster(QUORUM_SIZE); RAFT_TEST_UTIL.assertAllServersRunning(); RAFT_TEST_UTIL.setUsePeristentLog(true); - regionInfo = RAFT_TEST_UTIL.initializePeers(); - RAFT_TEST_UTIL.addQuorum(regionInfo, mockLogs); - RAFT_TEST_UTIL.startQuorum(regionInfo); - client = RAFT_TEST_UTIL.getQuorumClient(regionInfo.getQuorumInfo()); + quorumInfo = RAFT_TEST_UTIL.initializePeers(); + RAFT_TEST_UTIL.addQuorum(quorumInfo, mockLogs); + RAFT_TEST_UTIL.startQuorum(quorumInfo); + client = RAFT_TEST_UTIL.getQuorumClient(quorumInfo); } @After @@ -69,8 +65,8 @@ public class TestBasicQuorumCommit { LOG.info("Passed the " + i + " commit !"); } // Verify all the logs across the quorum are the same - while(!RAFT_TEST_UTIL.verifyLogs(regionInfo.getQuorumInfo(), QUORUM_SIZE)) { - RAFT_TEST_UTIL.dumpStates(regionInfo); + while(!RAFT_TEST_UTIL.verifyLogs(quorumInfo, QUORUM_SIZE)) { + RAFT_TEST_UTIL.dumpStates(quorumInfo); try { // Sleep for MAX_TIMEOUT time for leader election to complete Thread.sleep(HConstants.QUORUM_CLIENT_COMMIT_DEADLINE_DEFAULT); @@ -83,11 +79,11 @@ public class TestBasicQuorumCommit { private void testSingleCommit() { try { - RAFT_TEST_UTIL.dumpStates(regionInfo); + RAFT_TEST_UTIL.dumpStates(quorumInfo); client.replicateCommits(Arrays.asList(generateTestingWALEdit())); - RAFT_TEST_UTIL.dumpStates(regionInfo); + RAFT_TEST_UTIL.dumpStates(quorumInfo); // Verify all the logs across the majority are the same - RAFT_TEST_UTIL.verifyLogs(regionInfo.getQuorumInfo(), QUORUM_MAJORITY); + RAFT_TEST_UTIL.verifyLogs(quorumInfo, QUORUM_MAJORITY); } catch (Exception e) { LOG.error("Errors: ", e); fail("Unexpected exception: " + e.getMessage()); diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicQuorumMembershipChange.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicQuorumMembershipChange.java index e75e4af..1d33457 100644 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicQuorumMembershipChange.java +++ b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicQuorumMembershipChange.java @@ -12,7 +12,6 @@ import org.junit.runner.RunWith; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.HConstants; - import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.consensus.client.QuorumClient; import org.junit.After; import org.junit.Before; @@ -29,7 +28,7 @@ import java.util.concurrent.ThreadPoolExecutor; TestBasicPeerFailure.class); private static int QUORUM_SIZE = 5; private static int QUORUM_MAJORITY = 3; - private static HRegionInfo regionInfo; + private static QuorumInfo quorumInfo; private static RaftTestUtil RAFT_TEST_UTIL = new RaftTestUtil(); private static QuorumClient client; private static volatile int transactionNums = 0; @@ -46,12 +45,12 @@ import java.util.concurrent.ThreadPoolExecutor; RAFT_TEST_UTIL.createRaftCluster(QUORUM_SIZE); RAFT_TEST_UTIL.setUsePeristentLog(true); RAFT_TEST_UTIL.assertAllServersRunning(); - regionInfo = RAFT_TEST_UTIL.initializePeers(); - RAFT_TEST_UTIL.addQuorum(regionInfo, null); - RAFT_TEST_UTIL.startQuorum(regionInfo); - client = RAFT_TEST_UTIL.getQuorumClient(regionInfo.getQuorumInfo()); + quorumInfo = RAFT_TEST_UTIL.initializePeers(); + RAFT_TEST_UTIL.addQuorum(quorumInfo, null); + RAFT_TEST_UTIL.startQuorum(quorumInfo); + client = RAFT_TEST_UTIL.getQuorumClient(quorumInfo); - loader = new ReplicationLoadForUnitTest(regionInfo, client, RAFT_TEST_UTIL, + loader = new ReplicationLoadForUnitTest(quorumInfo, client, RAFT_TEST_UTIL, QUORUM_SIZE, QUORUM_MAJORITY); transactionNums = 0; @@ -104,7 +103,7 @@ import java.util.concurrent.ThreadPoolExecutor; // Get the new config QuorumInfo newConfig = this.createNewQuorumInfo(newPorts); - LOG.debug("Old Config " + regionInfo.getPeersWithRank()); + LOG.debug("Old Config " + quorumInfo.getPeersWithRank()); LOG.debug("New Config " + newConfig.getPeersWithRank()); @@ -116,7 +115,7 @@ import java.util.concurrent.ThreadPoolExecutor; // same time. if (numPeersToChange >= this.QUORUM_MAJORITY) { Assert.assertFalse(client.changeQuorum(newConfig)); - newConfig = regionInfo.getQuorumInfo(); + newConfig = quorumInfo; } else { Assert.assertTrue(client.changeQuorum(newConfig)); // Tell the quorum client about the new config @@ -157,7 +156,7 @@ import java.util.concurrent.ThreadPoolExecutor; // Start the raft protocol on the server server.getHandler().getRaftQuorumContext( - regionInfo.getQuorumInfo().getQuorumName()).initializeAll( + quorumInfo.getQuorumName()).initializeAll( HConstants.UNDEFINED_TERM_INDEX); } } @@ -167,14 +166,14 @@ import java.util.concurrent.ThreadPoolExecutor; // Make a copy - QuorumInfo info = new QuorumInfo(regionInfo.getQuorumInfo()); + QuorumInfo info = new QuorumInfo(quorumInfo); boolean leaderReplaced = false; List peerToReplaceAddr; peerToReplaceAddr = new ArrayList<>(); List> newServers = new ArrayList<>(); Map currentPeers = - info.getPeers().get(HRegionInfo.LOCAL_DC_KEY); + info.getPeers().get(QuorumInfo.LOCAL_DC_KEY); HServerAddress oldPeer, newPeer; for (int newServerPort : ports) { @@ -202,12 +201,12 @@ import java.util.concurrent.ThreadPoolExecutor; } // Make sure we actually removed the required number of peers - Assert.assertTrue(info.getPeers().get(HRegionInfo.LOCAL_DC_KEY).size() == + Assert.assertTrue(info.getPeers().get(QuorumInfo.LOCAL_DC_KEY).size() == QUORUM_SIZE - ports.length); for (Pair server : newServers) { // Update the config - info.getPeers().get(HRegionInfo.LOCAL_DC_KEY).put(server.getFirst(), + info.getPeers().get(QuorumInfo.LOCAL_DC_KEY).put(server.getFirst(), server.getSecond()); } info.refresh(); diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicSeedCommitIndex.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicSeedCommitIndex.java index e7c8644..82c7ed9 100644 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicSeedCommitIndex.java +++ b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestBasicSeedCommitIndex.java @@ -1,27 +1,17 @@ package org.apache.hadoop.hbase.consensus; -import static junit.framework.Assert.fail; - import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; import java.util.List; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.consensus.client.QuorumClient; +import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo; import org.apache.hadoop.hbase.consensus.quorum.RaftQuorumContext; import org.apache.hadoop.hbase.consensus.server.LocalConsensusServer; -import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -31,7 +21,7 @@ public class TestBasicSeedCommitIndex { private static final int QUORUM_SIZE = 5; private static final int QUORUM_MAJORITY = 3; - private static HRegionInfo regionInfo; + private static QuorumInfo quorumInfo; private static RaftTestUtil RAFT_TEST_UTIL = new RaftTestUtil(); private final long seedIndex = 100; @@ -42,10 +32,10 @@ public class TestBasicSeedCommitIndex { RAFT_TEST_UTIL.createRaftCluster(QUORUM_SIZE); RAFT_TEST_UTIL.assertAllServersRunning(); RAFT_TEST_UTIL.setUsePeristentLog(true); - regionInfo = RAFT_TEST_UTIL.initializePeers(); - RAFT_TEST_UTIL.addQuorum(regionInfo, null); + quorumInfo = RAFT_TEST_UTIL.initializePeers(); + RAFT_TEST_UTIL.addQuorum(quorumInfo, null); RAFT_TEST_UTIL.setSeedIndex(seedIndex); - RAFT_TEST_UTIL.startQuorum(regionInfo); + RAFT_TEST_UTIL.startQuorum(quorumInfo); } @@ -59,14 +49,14 @@ public class TestBasicSeedCommitIndex { // Wait for leader - RaftQuorumContext c5 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, 5); + RaftQuorumContext c5 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, 5); while (!c5.isLeader()) { Threads.sleep(1000); } // Wait till we purge the seed log file int count = 0; - List peers = RAFT_TEST_UTIL.getQuorumContexts(regionInfo); + List peers = RAFT_TEST_UTIL.getQuorumContexts(quorumInfo); while (count != QUORUM_SIZE) { Threads.sleep(1000); @@ -76,7 +66,7 @@ public class TestBasicSeedCommitIndex { ++count; } } - RAFT_TEST_UTIL.dumpStates(regionInfo); + RAFT_TEST_UTIL.dumpStates(quorumInfo); } // At this point the state should @@ -87,16 +77,16 @@ public class TestBasicSeedCommitIndex { // [rank: 1] ; FOLLOWER ; { Uncommitted [101, 101] } // Let's stop the leader - LocalConsensusServer s5 = RAFT_TEST_UTIL.stopLocalConsensusServer(regionInfo, + LocalConsensusServer s5 = RAFT_TEST_UTIL.stopLocalConsensusServer(quorumInfo, 5); - RaftQuorumContext c4 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, 4); + RaftQuorumContext c4 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, 4); while (!c4.isLeader()) { Threads.sleep(1000); } - c5 = RAFT_TEST_UTIL.restartLocalConsensusServer(s5, regionInfo, + c5 = RAFT_TEST_UTIL.restartLocalConsensusServer(s5, quorumInfo, c5.getMyAddress()); while (!c5.isLeader()) { @@ -105,7 +95,7 @@ public class TestBasicSeedCommitIndex { // Wait for logs to be verified // Verify logs are identical across all the quorum members - while (!RAFT_TEST_UTIL.verifyLogs(regionInfo.getQuorumInfo(), QUORUM_SIZE)) { + while (!RAFT_TEST_UTIL.verifyLogs(quorumInfo, QUORUM_SIZE)) { Threads.sleep(1000); System.out.println("Verifying logs ...."); Assert diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestCommitDeadline.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestCommitDeadline.java index 883a2ab..cf5015f 100644 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestCommitDeadline.java +++ b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestCommitDeadline.java @@ -2,9 +2,9 @@ package org.apache.hadoop.hbase.consensus; import com.google.common.base.Stopwatch; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.consensus.client.QuorumClient; import org.apache.hadoop.hbase.consensus.client.QuorumThriftClientAgent; +import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -19,7 +19,7 @@ public class TestCommitDeadline { private static int NUM_REPLICAS = 3; private static int QUORUM_SIZE = 2; - private HRegionInfo regionInfo; + private QuorumInfo quorumInfo; private QuorumClient client; private QuorumThriftClientAgent leader; @@ -34,14 +34,14 @@ public class TestCommitDeadline { // A successful commit should complete within the set deadline. Stopwatch stopwatch = new Stopwatch(); //Stopwatch.createStarted(); - assertTrue(leader.replicateCommit(regionInfo.getEncodedName(), + assertTrue(leader.replicateCommit(quorumInfo.getQuorumName(), RAFT_TEST_UTIL.generateTransaction(1024)) > 0); assertTrue("The commit should complete within the deadline", stopwatch.elapsedTime(TimeUnit.MILLISECONDS) < deadline); // Stop the majority of the replicas. The leader should remain leader. for (int i = 0; i < QUORUM_SIZE; i++) { - RAFT_TEST_UTIL.stopLocalConsensusServer(regionInfo, i + 1); + RAFT_TEST_UTIL.stopLocalConsensusServer(quorumInfo, i + 1); } leader = client.getLeader(); assertNotNull(leader); @@ -50,7 +50,7 @@ public class TestCommitDeadline { stopwatch.reset().start(); Exception expectedException = null; try { - leader.replicateCommit(regionInfo.getEncodedName(), + leader.replicateCommit(quorumInfo.getQuorumName(), RAFT_TEST_UTIL.generateTransaction(1024)); } catch (Exception e) { expectedException = e; @@ -69,13 +69,13 @@ public class TestCommitDeadline { RAFT_TEST_UTIL.assertAllServersRunning(); RAFT_TEST_UTIL.setUsePeristentLog(true); - regionInfo = RAFT_TEST_UTIL.initializePeers(); + quorumInfo = RAFT_TEST_UTIL.initializePeers(); - RAFT_TEST_UTIL.addQuorum(regionInfo, - RAFT_TEST_UTIL.getScratchSetup(NUM_REPLICAS)); - RAFT_TEST_UTIL.startQuorum(regionInfo); + RAFT_TEST_UTIL.addQuorum(quorumInfo, + RAFT_TEST_UTIL.getScratchSetup(NUM_REPLICAS)); + RAFT_TEST_UTIL.startQuorum(quorumInfo); - client = RAFT_TEST_UTIL.getQuorumClient(regionInfo.getQuorumInfo()); + client = RAFT_TEST_UTIL.getQuorumClient(quorumInfo); } @After diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestLowerRankBecomingLeader.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestLowerRankBecomingLeader.java index a72ebf9..94d3222 100644 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestLowerRankBecomingLeader.java +++ b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestLowerRankBecomingLeader.java @@ -1,8 +1,8 @@ package org.apache.hadoop.hbase.consensus; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.consensus.client.QuorumClient; +import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo; import org.apache.hadoop.hbase.consensus.quorum.RaftQuorumContext; import org.apache.hadoop.hbase.consensus.server .InstrumentedConsensusServiceImpl; @@ -21,7 +21,7 @@ public class TestLowerRankBecomingLeader { TestLowerRankBecomingLeader.class); private static int QUORUM_SIZE = 3; private static int QUORUM_MAJORITY = 2; - private static HRegionInfo regionInfo; + private static QuorumInfo quorumInfo; private static RaftTestUtil RAFT_TEST_UTIL = new RaftTestUtil(); private static QuorumClient client; private static volatile int transactionNum = 0; @@ -33,12 +33,12 @@ public class TestLowerRankBecomingLeader { RAFT_TEST_UTIL.createRaftCluster(QUORUM_SIZE); RAFT_TEST_UTIL.setUsePeristentLog(true); RAFT_TEST_UTIL.assertAllServersRunning(); - regionInfo = RAFT_TEST_UTIL.initializePeers(); - RAFT_TEST_UTIL.addQuorum(regionInfo, RAFT_TEST_UTIL.getScratchSetup(QUORUM_SIZE)); - RAFT_TEST_UTIL.startQuorum(regionInfo); - client = RAFT_TEST_UTIL.getQuorumClient(regionInfo.getQuorumInfo()); + quorumInfo = RAFT_TEST_UTIL.initializePeers(); + RAFT_TEST_UTIL.addQuorum(quorumInfo, RAFT_TEST_UTIL.getScratchSetup(QUORUM_SIZE)); + RAFT_TEST_UTIL.startQuorum(quorumInfo); + client = RAFT_TEST_UTIL.getQuorumClient(quorumInfo); transactionNum = 0; - loader = new ReplicationLoadForUnitTest(regionInfo, client, RAFT_TEST_UTIL, QUORUM_SIZE, + loader = new ReplicationLoadForUnitTest(quorumInfo, client, RAFT_TEST_UTIL, QUORUM_SIZE, QUORUM_MAJORITY); } @@ -65,23 +65,23 @@ public class TestLowerRankBecomingLeader { // Let the traffic fly for a while transactionNum = loader.makeProgress(sleepTime, transactionNum); - RAFT_TEST_UTIL.simulatePacketDropForServer(regionInfo, 2, + RAFT_TEST_UTIL.simulatePacketDropForServer(quorumInfo, 2, InstrumentedConsensusServiceImpl.PacketDropStyle.ALWAYS); transactionNum = loader.makeProgress(sleepTime, transactionNum); - RAFT_TEST_UTIL.simulatePacketDropForServer(regionInfo, 2, + RAFT_TEST_UTIL.simulatePacketDropForServer(quorumInfo, 2, InstrumentedConsensusServiceImpl.PacketDropStyle.NONE); // At this point, rank 1 and rank 3 peers are up to date // Get all the quorum contexts from rank 3 to rank 2 - RaftQuorumContext c3 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, 3); - RaftQuorumContext c2 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, 2); + RaftQuorumContext c3 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, 3); + RaftQuorumContext c2 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, 2); // Shutdown 1st quorum member whose rank is 3 System.out.println("Stopping one quorum member: " + c3); - LocalConsensusServer s3 = RAFT_TEST_UTIL.stopLocalConsensusServer(regionInfo, 3); + LocalConsensusServer s3 = RAFT_TEST_UTIL.stopLocalConsensusServer(quorumInfo, 3); // Let the traffic fly for a while if ((++failureCnt % failureInterval) == 0) { @@ -94,7 +94,7 @@ public class TestLowerRankBecomingLeader { } // Restart the quorum member whose rank is 3 - c3 = RAFT_TEST_UTIL.restartLocalConsensusServer(s3, regionInfo, c3.getMyAddress()); + c3 = RAFT_TEST_UTIL.restartLocalConsensusServer(s3, quorumInfo, c3.getMyAddress()); System.out.println("Restarted another quorum member: " + c3); // Let the traffic fly for a while @@ -110,7 +110,7 @@ public class TestLowerRankBecomingLeader { loader.slowDownReplicationLoad(); // Verify logs are identical across all the quorum members - while (!RAFT_TEST_UTIL.verifyLogs(regionInfo.getQuorumInfo(), QUORUM_SIZE)) { + while (!RAFT_TEST_UTIL.verifyLogs(quorumInfo, QUORUM_SIZE)) { Thread.sleep(10 * 1000); System.out.println("Verifying logs ...."); Assert.assertTrue("Rank 3 shall be the leader of the quorum", c3.isLeader()); diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestPersistLastVotedFor.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestPersistLastVotedFor.java index 73683bb..347103e 100644 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestPersistLastVotedFor.java +++ b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestPersistLastVotedFor.java @@ -1,8 +1,8 @@ package org.apache.hadoop.hbase.consensus; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.consensus.client.QuorumClient; import org.apache.hadoop.hbase.consensus.protocol.ConsensusHost; +import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo; import org.apache.hadoop.hbase.consensus.quorum.RaftQuorumContext; import org.apache.hadoop.hbase.consensus.server.LocalConsensusServer; import org.junit.After; @@ -17,7 +17,7 @@ import java.io.IOException; public class TestPersistLastVotedFor { private static int QUORUM_SIZE = 3; private static int QUORUM_MAJORITY = 2; - private static HRegionInfo regionInfo; + private static QuorumInfo quorumInfo; private static RaftTestUtil RAFT_TEST_UTIL = new RaftTestUtil(); private static QuorumClient client; private static volatile int transactionNum = 0; @@ -29,13 +29,13 @@ public class TestPersistLastVotedFor { RAFT_TEST_UTIL.createRaftCluster(QUORUM_SIZE); RAFT_TEST_UTIL.setUsePeristentLog(true); RAFT_TEST_UTIL.assertAllServersRunning(); - regionInfo = RAFT_TEST_UTIL.initializePeers(); - RAFT_TEST_UTIL.addQuorum(regionInfo, RAFT_TEST_UTIL.getScratchSetup(QUORUM_SIZE)); - RAFT_TEST_UTIL.startQuorum(regionInfo); + quorumInfo = RAFT_TEST_UTIL.initializePeers(); + RAFT_TEST_UTIL.addQuorum(quorumInfo, RAFT_TEST_UTIL.getScratchSetup(QUORUM_SIZE)); + RAFT_TEST_UTIL.startQuorum(quorumInfo); - client = RAFT_TEST_UTIL.getQuorumClient(regionInfo.getQuorumInfo()); + client = RAFT_TEST_UTIL.getQuorumClient(quorumInfo); transactionNum = 0; - loader = new ReplicationLoadForUnitTest(regionInfo, client, RAFT_TEST_UTIL, QUORUM_SIZE, + loader = new ReplicationLoadForUnitTest(quorumInfo, client, RAFT_TEST_UTIL, QUORUM_SIZE, QUORUM_MAJORITY); } @@ -52,7 +52,7 @@ public class TestPersistLastVotedFor { loader.stopReplicationLoad(); RaftQuorumContext leader = - RAFT_TEST_UTIL.getLeaderQuorumContext(regionInfo); + RAFT_TEST_UTIL.getLeaderQuorumContext(quorumInfo); // What is the current lastVotedFor ConsensusHost initialLastVotedFor = leader.getLastVotedFor(); @@ -61,7 +61,7 @@ public class TestPersistLastVotedFor { RAFT_TEST_UTIL.stopLocalConsensusServer(leader.getMyAddress()); RaftQuorumContext newQuorumContext = - RAFT_TEST_UTIL.restartLocalConsensusServer(consensusServer, regionInfo, + RAFT_TEST_UTIL.restartLocalConsensusServer(consensusServer, quorumInfo, leader.getMyAddress()); ConsensusHost lastVotedForAsReadFromDisk = newQuorumContext.getLastVotedFor(); @@ -73,7 +73,7 @@ public class TestPersistLastVotedFor { consensusServer = RAFT_TEST_UTIL.stopLocalConsensusServer(newQuorumContext.getMyAddress()); RaftQuorumContext newQuorumContextAfterSecondRestart = - RAFT_TEST_UTIL.restartLocalConsensusServer(consensusServer, regionInfo, + RAFT_TEST_UTIL.restartLocalConsensusServer(consensusServer, quorumInfo, newQuorumContext.getMyAddress()); ConsensusHost emptyLastVotedFor = diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestRaftEventListener.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestRaftEventListener.java index 7da8372..414b361 100644 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestRaftEventListener.java +++ b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/TestRaftEventListener.java @@ -1,9 +1,8 @@ package org.apache.hadoop.hbase.consensus; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.consensus.client.QuorumClient; -import org.apache.hadoop.hbase.consensus.protocol.EditId; import org.apache.hadoop.hbase.consensus.protocol.Payload; +import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo; import org.apache.hadoop.hbase.consensus.quorum.RaftQuorumContext; import org.apache.hadoop.hbase.consensus.server.LocalConsensusServer; import org.apache.hadoop.hbase.consensus.server.peer.PeerServer; @@ -12,8 +11,6 @@ import org.apache.hadoop.hbase.regionserver.RaftEventListener; import org.junit.After; import org.junit.Before; import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.IOException; import java.nio.ByteBuffer; @@ -26,7 +23,7 @@ public class TestRaftEventListener { private static int QUORUM_SIZE = 5; private static int QUORUM_MAJORITY = 3; - private static HRegionInfo regionInfo; + private static QuorumInfo quorumInfo; private static RaftTestUtil RAFT_TEST_UTIL = new RaftTestUtil(); private static QuorumClient client; private int transactionNum = 0; @@ -79,18 +76,18 @@ public class TestRaftEventListener { RAFT_TEST_UTIL.createRaftCluster(QUORUM_SIZE); RAFT_TEST_UTIL.setUsePeristentLog(true); RAFT_TEST_UTIL.assertAllServersRunning(); - regionInfo = RAFT_TEST_UTIL.initializePeers(); - RAFT_TEST_UTIL.addQuorum(regionInfo, RAFT_TEST_UTIL.getScratchSetup(QUORUM_SIZE)); - RAFT_TEST_UTIL.startQuorum(regionInfo); - client = RAFT_TEST_UTIL.getQuorumClient(regionInfo.getQuorumInfo()); + quorumInfo = RAFT_TEST_UTIL.initializePeers(); + RAFT_TEST_UTIL.addQuorum(quorumInfo, RAFT_TEST_UTIL.getScratchSetup(QUORUM_SIZE)); + RAFT_TEST_UTIL.startQuorum(quorumInfo); + client = RAFT_TEST_UTIL.getQuorumClient(quorumInfo); // Register the listener for the highest rank, which is equal to QUORUM_SIZE; for (Map.Entry entry : - RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, QUORUM_SIZE).getPeerServers().entrySet()) { + RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, QUORUM_SIZE).getPeerServers().entrySet()) { entry.getValue().registerDataStoreEventListener(listener); } - loader = new ReplicationLoadForUnitTest(regionInfo, client, RAFT_TEST_UTIL, QUORUM_SIZE, + loader = new ReplicationLoadForUnitTest(quorumInfo, client, RAFT_TEST_UTIL, QUORUM_SIZE, QUORUM_MAJORITY); } @@ -109,16 +106,16 @@ public class TestRaftEventListener { transactionNum = loader.makeProgress(1000, transactionNum); // Stop the replica whose rank is 4 - RaftQuorumContext c4 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, 4); + RaftQuorumContext c4 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, 4); System.out.println("Stopping one quorum member: " + c4); - LocalConsensusServer s4 = RAFT_TEST_UTIL.stopLocalConsensusServer(regionInfo, 4); + LocalConsensusServer s4 = RAFT_TEST_UTIL.stopLocalConsensusServer(quorumInfo, 4); // Sleep for 1 sec transactionNum = loader.makeProgress(2000, transactionNum); assertEquals(1, unavailablePeerSet.size()); // Start the replica whose rank is 4 - RAFT_TEST_UTIL.restartLocalConsensusServer(s4, regionInfo, c4.getMyAddress()); + RAFT_TEST_UTIL.restartLocalConsensusServer(s4, quorumInfo, c4.getMyAddress()); System.out.println("Restarted one quorum member: " + c4); // Sleep for 5 sec diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/fsm/TestAsyncStatesInRaftStateMachine.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/fsm/TestAsyncStatesInRaftStateMachine.java index 17d7ace..4a73bdc 100644 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/fsm/TestAsyncStatesInRaftStateMachine.java +++ b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/fsm/TestAsyncStatesInRaftStateMachine.java @@ -3,16 +3,14 @@ package org.apache.hadoop.hbase.consensus.fsm; import com.google.common.util.concurrent.SettableFuture; import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.consensus.RaftTestUtil; import org.apache.hadoop.hbase.consensus.client.QuorumClient; +import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo; import org.apache.hadoop.hbase.consensus.quorum.RaftQuorumContext; import org.apache.hadoop.hbase.consensus.raft.states.RaftStateType; import org.apache.hadoop.hbase.consensus.server.LocalConsensusServer; -import org.apache.hadoop.hbase.consensus.server.peer.AbstractPeer; import org.apache.hadoop.hbase.consensus.server.peer.PeerServer; -import org.apache.hadoop.hbase.consensus.server.peer.states.PeerServerStateType; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; @@ -40,7 +38,7 @@ public class TestAsyncStatesInRaftStateMachine { private static final int QUORUM_SIZE = 5; private static final int QUORUM_MAJORITY = 3; - private static HRegionInfo regionInfo; + private static QuorumInfo quorumInfo; private static RaftTestUtil RAFT_TEST_UTIL = new RaftTestUtil(); private Configuration conf; private QuorumClient client; @@ -100,10 +98,10 @@ public class TestAsyncStatesInRaftStateMachine { RAFT_TEST_UTIL.createRaftCluster(QUORUM_SIZE); RAFT_TEST_UTIL.assertAllServersRunning(); RAFT_TEST_UTIL.setUsePeristentLog(true); - regionInfo = RAFT_TEST_UTIL.initializePeers(); - RAFT_TEST_UTIL.addQuorum(regionInfo, null); - RAFT_TEST_UTIL.startQuorum(regionInfo); - client = RAFT_TEST_UTIL.getQuorumClient(regionInfo.getQuorumInfo()); + quorumInfo = RAFT_TEST_UTIL.initializePeers(); + RAFT_TEST_UTIL.addQuorum(quorumInfo, null); + RAFT_TEST_UTIL.startQuorum(quorumInfo); + client = RAFT_TEST_UTIL.getQuorumClient(quorumInfo); } @After @@ -129,7 +127,7 @@ public class TestAsyncStatesInRaftStateMachine { // the peer with the lowest timeout will try to write the // votedFor and should get stuck. RaftQuorumContext r = RAFT_TEST_UTIL.getRaftQuorumContextByRank( - regionInfo, 5); + quorumInfo, 5); assertEquals(RaftStateType.SEND_VOTE_REQUEST, r.getCurrentRaftState().getStateType()); throw e; @@ -143,26 +141,26 @@ public class TestAsyncStatesInRaftStateMachine { testReplicatingCommits(1); RaftQuorumContext leader = - RAFT_TEST_UTIL.getLeaderQuorumContext(regionInfo); + RAFT_TEST_UTIL.getLeaderQuorumContext(quorumInfo); // Stop the peer with rank = 1. RaftQuorumContext peer = - RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, 1); + RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, 1); PeerServer peerServer = leader.getPeerServers().get(peer.getMyAddress()); LocalConsensusServer peerConsensusServer = - RAFT_TEST_UTIL.stopLocalConsensusServer(regionInfo, 1); + RAFT_TEST_UTIL.stopLocalConsensusServer(quorumInfo, 1); // Replicate some other commits, the dead server will miss out. testReplicatingCommits(10); // Restart that dead server. RAFT_TEST_UTIL.restartLocalConsensusServer(peerConsensusServer, - regionInfo, peer.getMyAddress()); + quorumInfo, peer.getMyAddress()); // Wait for dead server to come back long start = System.currentTimeMillis(); - while (!RAFT_TEST_UTIL.verifyLogs(regionInfo.getQuorumInfo(), QUORUM_SIZE, true) && + while (!RAFT_TEST_UTIL.verifyLogs(quorumInfo, QUORUM_SIZE, true) && !blockOnHandleAppendResponse) { Thread.sleep(1000); // stop if we waited for more than 10 seconds @@ -218,17 +216,17 @@ public class TestAsyncStatesInRaftStateMachine { private void testReplicatingCommits(int numCommits) { try { - RAFT_TEST_UTIL.waitForLeader(regionInfo); + RAFT_TEST_UTIL.waitForLeader(quorumInfo); RaftQuorumContext leader = - RAFT_TEST_UTIL.getLeaderQuorumContext(regionInfo); + RAFT_TEST_UTIL.getLeaderQuorumContext(quorumInfo); Assert.assertNotNull(leader); - RAFT_TEST_UTIL.dumpStates(regionInfo); + RAFT_TEST_UTIL.dumpStates(quorumInfo); for (int i = 0; i < numCommits; i++) { client.replicateCommits(Arrays.asList(generateTestingWALEdit())); } - RAFT_TEST_UTIL.dumpStates(regionInfo); + RAFT_TEST_UTIL.dumpStates(quorumInfo); } catch (Exception e) { LOG.error("Errors: ", e); fail("Unexpected exception: " + e); diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/log/TestRemoteLogFetcher.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/log/TestRemoteLogFetcher.java index 0605222..1738a5e 100644 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/log/TestRemoteLogFetcher.java +++ b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/log/TestRemoteLogFetcher.java @@ -1,10 +1,10 @@ package org.apache.hadoop.hbase.consensus.log; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.consensus.RaftTestUtil; import org.apache.hadoop.hbase.consensus.ReplicationLoadForUnitTest; import org.apache.hadoop.hbase.consensus.client.FetchTask; import org.apache.hadoop.hbase.consensus.client.QuorumClient; +import org.apache.hadoop.hbase.consensus.quorum.QuorumInfo; import org.apache.hadoop.hbase.consensus.quorum.RaftQuorumContext; import org.apache.hadoop.hbase.util.Pair; import org.junit.After; @@ -27,7 +27,7 @@ public class TestRemoteLogFetcher { private static final Logger LOG = LoggerFactory.getLogger(TestRemoteLogFetcher.class); private static int QUORUM_SIZE = 3; private static int QUORUM_MAJORITY = 2; - private static HRegionInfo regionInfo; + private static QuorumInfo quorumInfo; private static RaftTestUtil RAFT_TEST_UTIL = new RaftTestUtil(); private static QuorumClient client; private static volatile int transactionNum = 0; @@ -39,12 +39,12 @@ public class TestRemoteLogFetcher { RAFT_TEST_UTIL.createRaftCluster(QUORUM_SIZE); RAFT_TEST_UTIL.setUsePeristentLog(true); RAFT_TEST_UTIL.assertAllServersRunning(); - regionInfo = RAFT_TEST_UTIL.initializePeers(); - RAFT_TEST_UTIL.addQuorum(regionInfo, RAFT_TEST_UTIL.getScratchSetup(QUORUM_SIZE)); - RAFT_TEST_UTIL.startQuorum(regionInfo); - client = RAFT_TEST_UTIL.getQuorumClient(regionInfo.getQuorumInfo()); + quorumInfo = RAFT_TEST_UTIL.initializePeers(); + RAFT_TEST_UTIL.addQuorum(quorumInfo, RAFT_TEST_UTIL.getScratchSetup(QUORUM_SIZE)); + RAFT_TEST_UTIL.startQuorum(quorumInfo); + client = RAFT_TEST_UTIL.getQuorumClient(quorumInfo); transactionNum = 0; - loader = new ReplicationLoadForUnitTest(regionInfo, client, RAFT_TEST_UTIL, QUORUM_SIZE, + loader = new ReplicationLoadForUnitTest(quorumInfo, client, RAFT_TEST_UTIL, QUORUM_SIZE, QUORUM_MAJORITY); } @@ -55,9 +55,9 @@ public class TestRemoteLogFetcher { @Test(timeout=60000) public void testLogFileStatusRetrieval() throws Exception { - RaftQuorumContext c3 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, 3); - RaftQuorumContext c2 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, 2); - RaftQuorumContext c1 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(regionInfo, 1); + RaftQuorumContext c3 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, 3); + RaftQuorumContext c2 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, 2); + RaftQuorumContext c1 = RAFT_TEST_UTIL.getRaftQuorumContextByRank(quorumInfo, 1); TransactionLogManager l3 = (TransactionLogManager)c3.getLogManager(); // Around 60 indices per log file on peer 3 diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestParser.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestParser.java deleted file mode 100644 index 334fe09..0000000 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestParser.java +++ /dev/null @@ -1,93 +0,0 @@ -package org.apache.hadoop.hbase.consensus.rmap; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.*; -import org.apache.hadoop.hbase.util.Bytes; -import org.json.JSONException; -import org.json.JSONObject; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.nio.file.Files; -import java.nio.file.Paths; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -public class TestParser { - Configuration conf; - Parser parser; - JSONObject rmapAsJSON; - - @Before - public void setUp() throws IOException, JSONException { - conf = HBaseConfiguration.create(); - conf.set(HConstants.HYDRABASE_DCNAME, "DUMMYCLUSTER1"); - - parser = new Parser(conf); - rmapAsJSON = new JSONObject(new String(Files.readAllBytes( - Paths.get(getClass().getResource("rmap.json").getPath())))); - } - - @Test - public void testParseRMap() throws IOException, JSONException { - List regions = parser.parseTable("RPCBenchmarkingTable", - getTableObjectFromJSON("RPCBenchmarkingTable")); - assertEquals(3, regions.size()); - HRegionInfo region = regions.get(0); - HTableDescriptor table = region.getTableDesc(); - assertEquals("RPCBenchmarkingTable", table.getNameAsString()); - assertFalse(table.isMetaTable()); - assertFalse(table.isRootRegion()); - HColumnDescriptor[] columnFamilies = table.getColumnFamilies(); - assertEquals(1, columnFamilies.length); - HColumnDescriptor cf0 = columnFamilies[0]; - assertEquals("cf", cf0.getNameAsString()); - assertEquals("true", cf0.getValue("BLOCKCACHE")); - assertEquals("65536", cf0.getValue("BLOCKSIZE")); - assertEquals("NONE", cf0.getValue("BLOOMFILTER")); - assertEquals("0.01", cf0.getValue("BLOOMFILTER_ERRORRATE")); - assertEquals("NONE", cf0.getValue("COMPRESSION")); - assertEquals("NONE", cf0.getValue("DATA_BLOCK_ENCODING")); - assertEquals("true", cf0.getValue("ENCODE_ON_DISK")); - assertEquals("false", cf0.getValue("IN_MEMORY")); - assertEquals("0", cf0.getValue("REPLICATION_SCOPE")); - assertEquals("2147483647", cf0.getValue("TTL")); - assertEquals("2147483647", cf0.getValue("VERSIONS")); - - assertEquals("aeeb54dc6fbca609443bd35796b59da5", region.getEncodedName()); - assertEquals("", Bytes.toString(region.getStartKey())); - assertEquals("2aaaaaaa", Bytes.toString(region.getEndKey())); - assertEquals(1373324048180L, region.getRegionId()); - - InetSocketAddress[] favoredNodes = - region.getFavoredNodesMap().get("DUMMYCLUSTER1"); - assertEquals(3, favoredNodes.length); - assertEquals(new InetSocketAddress("10.159.9.49", 60020), favoredNodes[0]); - assertEquals(new InetSocketAddress("10.159.9.45", 60020), favoredNodes[1]); - assertEquals(new InetSocketAddress("10.159.9.47", 60020), favoredNodes[2]); - - Map> peers = region.getPeers(); - assertEquals(1, peers.size()); - Map peersWithRank = region.getPeersWithRank(); - assertEquals(3, peersWithRank.size()); - assertEquals(new Integer(1), - peersWithRank.get(new HServerAddress("10.159.9.41:60020"))); - assertEquals(new Integer(2), - peersWithRank.get(new HServerAddress("10.159.9.45:60020"))); - assertEquals(new Integer(3), - peersWithRank.get(new HServerAddress("10.159.9.47:60020"))); - assertEquals(peers.get("DUMMYCLUSTER1"), peersWithRank); - - assertEquals(null, peersWithRank.get(new HServerAddress("1.1.1.1:11111"))); - } - - private JSONObject getTableObjectFromJSON(final String name) - throws JSONException { - return rmapAsJSON.getJSONObject("tables").getJSONObject(name); - } -} diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRMapConfiguration.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRMapConfiguration.java deleted file mode 100644 index 4c22dc1..0000000 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRMapConfiguration.java +++ /dev/null @@ -1,55 +0,0 @@ -package org.apache.hadoop.hbase.consensus.rmap; - -import static org.junit.Assert.*; - -import java.net.URI; -import java.net.URISyntaxException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HConstants; -import org.junit.Before; -import org.junit.Test; - -public class TestRMapConfiguration { - private Configuration conf; - private RMapConfiguration rmapConf; - - @Before - public void setUp() throws Exception { - conf = HBaseConfiguration.create(); - conf.set(HConstants.RMAP_SUBSCRIPTION, - getClass().getResource("rmap.json").toURI().toString()); - conf.set(HConstants.HYDRABASE_DCNAME, "DUMMYCLUSTER1"); - - rmapConf = new RMapConfiguration(conf); - URI uri = RMapConfiguration.getRMapSubscription(conf); - rmapConf.readRMap(uri); - rmapConf.appliedRMap(uri); - } - - @Test - public void testReadingEmptyRMapSubscription() { - conf.set(HConstants.RMAP_SUBSCRIPTION, ""); - assertNull("RMap subscription should be empty", - rmapConf.getRMapSubscription(conf)); - } - - @Test - public void testReadingNonEmptyRMapSubscription() - throws URISyntaxException { - conf.set(HConstants.RMAP_SUBSCRIPTION, - "hbase/rmaps/map1"); - URI expectedRMapSubscription = new URI("hbase/rmaps/map1"); - assertEquals(expectedRMapSubscription, - rmapConf.getRMapSubscription(conf)); - } - - @Test - public void shouldApplyRMap() { - URI subscription = RMapConfiguration.getRMapSubscription(conf); - assertTrue(rmapConf.isRMapApplied(subscription)); - } -} diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRMapReader.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRMapReader.java deleted file mode 100644 index 4e7a798..0000000 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRMapReader.java +++ /dev/null @@ -1,102 +0,0 @@ -package org.apache.hadoop.hbase.consensus.rmap; - -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.security.NoSuchAlgorithmException; - -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class TestRMapReader { - static String base = "file:/some/rmap.json"; - - RMapReader reader; - URI current, next, v2, v3; - - @Test - public void shouldReturnVersionedURI() throws URISyntaxException { - assertEquals(v2, RMapReader.getVersionedURI(base, 2)); - } - - @Test - public void shouldReturnCurrentVersionedURI() throws URISyntaxException { - assertEquals(current, RMapReader.getVersionedURI(base, RMapReader.CURRENT)); - } - - @Test - public void shouldReturnNextVersionedURI() throws URISyntaxException { - assertEquals(next, RMapReader.getVersionedURI(base, RMapReader.NEXT)); - } - - @Test - public void shouldReturnAbsoluteVersion() throws URISyntaxException { - assertEquals(2, RMapReader.getVersion(v2)); - } - - @Test - public void shouldReturnCurrentSymbolicVersion() throws URISyntaxException { - assertEquals(RMapReader.CURRENT, RMapReader.getVersion(current)); - } - - @Test - public void shouldReturnNextSymbolicVersion() throws URISyntaxException { - assertEquals(RMapReader.NEXT, RMapReader.getVersion(next)); - } - - @Test - public void shouldReturnUnknownSymbolicVersion() throws URISyntaxException { - assertEquals(RMapReader.UNKNOWN, - RMapReader.getVersion(new URI(base + "?version=FOO"))); - } - - @Test - public void shouldResolveSymbolicVersionAndReturnRMap() - throws URISyntaxException, IOException, RMapException { - // Stub the abstract methods and forward call to RMapReader.readRMap(). - // This is a bit frowned upon. - when(reader.resolveSymbolicVersion(current)).thenReturn(v2); - when(reader.readRMapAsString(v2)).thenReturn("{}"); - when(reader.readRMap(current)).thenCallRealMethod(); - - RMapJSON rmap = reader.readRMap(current); - assertEquals(v2, rmap.uri); - assertEquals("{}", rmap.rmap.toString()); - } - - @Test - public void shouldReturnMD5HashAsHex() throws NoSuchAlgorithmException { - assertEquals("99914b932bd37a50b983c5e7c90ae93b", - RMapReader.getSignature("{}")); - } - - @Test - public void shouldReturnCurrentVersion() throws URISyntaxException { - when(reader.resolveSymbolicVersion(current)).thenReturn(v2); - when(reader.getCurrentVersion(base)).thenCallRealMethod(); - - assertEquals(2, reader.getCurrentVersion(base)); - } - - @Test - public void shoudlReturnNextVersion() throws URISyntaxException { - when(reader.resolveSymbolicVersion(next)).thenReturn(v3); - when(reader.getNextVersion(base)).thenCallRealMethod(); - - assertEquals(3, reader.getNextVersion(base)); - } - - @Before - public void setUp() throws URISyntaxException, IOException, RMapException { - reader = mock(RMapReader.class); - // URIs can not be created outside of the method. - current = RMapReader.getVersionedURI(base, RMapReader.CURRENT); - next = RMapReader.getVersionedURI(base, RMapReader.NEXT); - v2 = RMapReader.getVersionedURI(base, 2); - v3 = RMapReader.getVersionedURI(base, 3); - } -} diff --git a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRegionLocator.java b/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRegionLocator.java deleted file mode 100644 index 3a93aed..0000000 --- a/hbase-consensus/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRegionLocator.java +++ /dev/null @@ -1,180 +0,0 @@ -package org.apache.hadoop.hbase.consensus.rmap; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.*; -import org.apache.hadoop.hbase.util.Bytes; -import org.json.JSONException; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -public class TestRegionLocator { - Configuration conf; - RegionLocator locator; - - @Before - public void setUp() throws Exception { - conf = HBaseConfiguration.create(); - conf.set(HConstants.RMAP_SUBSCRIPTION, - getClass().getResource("rmap.json").toURI().toString()); - conf.set(HConstants.HYDRABASE_DCNAME, "DUMMYCLUSTER1"); - - locator = new RegionLocator(conf); - locator.refresh(); - } - - @Test - public void testGetRegionsForServer() throws IOException, JSONException { - List regions = locator.getRegionsForServer( - new HServerAddress("10.159.9.45:60020")); - assertNotNull(regions); - assertEquals(28, regions.size()); - for (HRegionInfo region : regions) { - HTableDescriptor table = region.getTableDesc(); - if ("VerificationTest_DummyTable".equals(table.getNameAsString())) { - assertEquals(3, table.getColumnFamilies().length); - } else if ("wutang".equals(table.getNameAsString())) { - assertEquals(5, table.getColumnFamilies().length); - } - } - } - - @Test - public void testGetRegionsForTableWithPeersInMultipleCells() throws Exception { - List regions = locator.getRegionsForTable( - Bytes.toBytes("RPCBenchmarkingTable")); - assertNotNull(regions); - assertEquals(3, regions.size()); - for (HRegionInfo region : regions) { - if ("2aaaaaaa".equals(Bytes.toString(region.getStartKey()))) { - Map> expectedPeers = new HashMap<>(); - Map dummyCluster1 = new HashMap<>(); - dummyCluster1.put(new HServerAddress("10.159.9.45:60020"), 1); - dummyCluster1.put(new HServerAddress("10.159.9.47:60020"), 2); - expectedPeers.put("DUMMYCLUSTER1", dummyCluster1); - Map dummyCluster2 = new HashMap<>(); - dummyCluster2.put(new HServerAddress("10.159.9.42:60020"), 3); - dummyCluster2.put(new HServerAddress("10.159.9.43:60020"), 4); - expectedPeers.put("DUMMYCLUSTER2", dummyCluster2); - Map dummyCluster3 = new HashMap<>(); - dummyCluster3.put(new HServerAddress("10.159.9.49:60020"), 5); - expectedPeers.put("DUMMYCLUSTER3", dummyCluster3); - assertEquals(expectedPeers, region.getPeers()); - Map peersWithCluster = region - .getPeersWithCluster(); - assertEquals("DUMMYCLUSTER1", - peersWithCluster.get(new HServerAddress("10.159.9.45:60020"))); - assertEquals("DUMMYCLUSTER1", - peersWithCluster.get(new HServerAddress("10.159.9.47:60020"))); - assertEquals("DUMMYCLUSTER2", - peersWithCluster.get(new HServerAddress("10.159.9.42:60020"))); - assertEquals("DUMMYCLUSTER2", - peersWithCluster.get(new HServerAddress("10.159.9.43:60020"))); - assertEquals("DUMMYCLUSTER3", - peersWithCluster.get(new HServerAddress("10.159.9.49:60020"))); - - } - } - } - - @Test - public void testLocateRegionSingleRegion() throws IOException, JSONException { - byte[] row = new byte[1]; - for (int i = 0; i < 256; ++i) { - row[0] = (byte) i; - HRegionInfo region = locator.findRegion(Bytes.toBytes("wutang"), row); - assertNotNull(region); - assertEquals(5, region.getTableDesc().getColumnFamilies().length); - assertEquals("b2696f3faa4bd5767f2800bbcc2687c0", region.getEncodedName()); - assertEquals("", Bytes.toString(region.getStartKey())); - assertEquals("", Bytes.toString(region.getEndKey())); - assertEquals(1370994021138L, region.getRegionId()); - assertTrue(region.getFavoredNodes() == null || - region.getFavoredNodes().length == 0); - assertEquals(1, region.getPeersWithRank().size()); - assertEquals(new Integer(1), - region.getPeersWithRank().get(new HServerAddress("10.159.9.45:60020"))); - assertEquals(1, region.getPeers().size()); - assertEquals(1, region.getPeers().get("DUMMYCLUSTER1").size()); - assertEquals(new Integer(1), region.getPeers().get("DUMMYCLUSTER1") - .get(new HServerAddress("10.159.9.45:60020"))); - - } - } - - @Test - public void testLocateRegion() throws IOException, JSONException { - HRegionInfo region; - - // Test first region - region = locator.findRegion(Bytes.toBytes("loadtest"), - Bytes.toBytes("0")); - assertNotNull(region); - assertEquals(1, region.getTableDesc().getColumnFamilies().length); - assertEquals("", Bytes.toString(region.getStartKey())); - assertEquals("11111111", Bytes.toString(region.getEndKey())); - - // Test last region - region = locator.findRegion(Bytes.toBytes("loadtest"), - Bytes.toBytes("f")); - assertNotNull(region); - assertEquals(1, region.getTableDesc().getColumnFamilies().length); - assertEquals("eeeeeeee", Bytes.toString(region.getStartKey())); - assertEquals("", Bytes.toString(region.getEndKey())); - - // Test regions in the middle - region = locator.findRegion(Bytes.toBytes("loadtest"), - Bytes.toBytes("9")); - assertNotNull(region); - assertEquals(1, region.getTableDesc().getColumnFamilies().length); - assertEquals("88888888", Bytes.toString(region.getStartKey())); - assertEquals("99999999", Bytes.toString(region.getEndKey())); - - region = locator.findRegion(Bytes.toBytes("loadtest"), - Bytes.toBytes("9abcdefg")); - assertNotNull(region); - assertEquals(1, region.getTableDesc().getColumnFamilies().length); - assertEquals("99999999", Bytes.toString(region.getStartKey())); - assertEquals("aaaaaaaa", Bytes.toString(region.getEndKey())); - - // Test boundaries - region = locator.findRegion(Bytes.toBytes("loadtest"), - Bytes.toBytes("66666666")); - assertNotNull(region); - assertEquals(1, region.getTableDesc().getColumnFamilies().length); - assertEquals("66666666", Bytes.toString(region.getStartKey())); - assertEquals("77777777", Bytes.toString(region.getEndKey())); - - region = locator.findRegion(Bytes.toBytes("loadtest"), - Bytes.toBytes("cccccccc0")); - assertNotNull(region); - assertEquals(1, region.getTableDesc().getColumnFamilies().length); - assertEquals("cccccccc", Bytes.toString(region.getStartKey())); - assertEquals("dddddddd", Bytes.toString(region.getEndKey())); - - region = locator.findRegion(Bytes.toBytes("loadtest"), - Bytes.toBytes("2222222")); - assertNotNull(region); - assertEquals(1, region.getTableDesc().getColumnFamilies().length); - assertEquals("11111111", Bytes.toString(region.getStartKey())); - assertEquals("22222222", Bytes.toString(region.getEndKey())); - } - - @Test - public void shouldReturnAllRegionsGroupedByTable() { - List> regionsByTable = - locator.getAllRegionsGroupByTable(); - assertTrue(regionsByTable.size() > 0); - for (List regions : regionsByTable) { - assertTrue(regions.size() > 0); - } - } -} diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index 72aadb1..15d5f1c 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -333,6 +333,11 @@ org.apache.hbase + hbase-consensus + ${project.version} + + + org.apache.hbase hbase-hadoop-compat @@ -443,6 +448,11 @@ io.netty netty-all + + org.json + json + 20090211 + org.htrace diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/HDFSReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/HDFSReader.java new file mode 100644 index 0000000..7d6b0f7 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/HDFSReader.java @@ -0,0 +1,140 @@ +package org.apache.hadoop.hbase.consensus.rmap; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.util.Bytes; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class HDFSReader extends RMapReader { + protected static final Logger LOG = LoggerFactory.getLogger(HDFSReader.class); + + private Configuration conf; + + public HDFSReader(final Configuration conf) { + this.conf = conf; + } + + @Override + public List getVersions(URI uri) throws IOException { + Path path = new Path(getSchemeAndPath(uri)); + FileSystem fs = path.getFileSystem(conf); + FileStatus[] statuses = fs.globStatus(new Path(path.toString() + ".*")); + + List versions = new ArrayList<>(statuses.length); + for (FileStatus status : statuses) { + long version = getVersionFromPath(status.getPath().toString()); + if (version > 0) { + versions.add(version); + } + } + Collections.sort(versions); + return versions; + } + + @Override + public URI resolveSymbolicVersion(URI uri) throws URISyntaxException { + long version = getVersion(uri); + String schemeAndPath = getSchemeAndPath(uri); + + if (version == RMapReader.CURRENT || version == RMapReader.NEXT) { + Path link = new Path(String.format("%s.%s", schemeAndPath, + version == RMapReader.CURRENT ? "CURRENT" : "NEXT")); + // Resolve to an explicit version, or UNKNOWN + try { + Path target = getLinkTarget(link); + version = target != null ? getVersionFromPath(target.toString()) : + RMapReader.UNKNOWN; + } catch (IOException e) { + LOG.error("Failed to look up version from link:", e); + version = RMapReader.UNKNOWN; + } + } + + if (version > 0) { + return new URI(String.format("%s?version=%d", schemeAndPath, version)); + } + return new URI(schemeAndPath); + } + + @Override + public String readRMapAsString(final URI uri) throws IOException { + // Get file status, throws IOException if the path does not exist. + Path path = getPathWithVersion(uri); + FileSystem fs = path.getFileSystem(conf); + FileStatus status = fs.getFileStatus(path); + + long n = status.getLen(); + if (n < 0 || n > MAX_SIZE_BYTES) { + throw new IOException(String.format("Invalid RMap file size " + + "(expected between 0 and %d but got %d bytes)", + MAX_SIZE_BYTES, n)); + } + + byte[] buf = new byte[(int)n]; + FSDataInputStream stream = fs.open(path); + try { + stream.readFully(buf); + } finally { + stream.close(); + } + return Bytes.toString(buf); + } + + public Path getPathWithVersion(final URI uri) throws IOException { + long version = RMapReader.UNKNOWN; + try { + version = getVersion(resolveSymbolicVersion(uri)); + } catch (URISyntaxException e) { + // Ignore invalid URIs and assume version UNKNOWN + } + + if (version > 0) { + return new Path(String.format("%s.%d", getSchemeAndPath(uri), version)); + } + return new Path(uri.toString()); + } + + private long getVersionFromPath(final String path) { + String[] tokens = path.split("[\\.]"); + try { + return Long.parseLong(tokens[tokens.length - 1]); + } catch (NumberFormatException e) { + // Skip if token not numerical + } + return RMapReader.UNKNOWN; + } + + private Path getLinkTarget(final Path path) throws IOException { + FileSystem fs = path.getFileSystem(conf); + + // The getHardLinkedFiles call is a bit tricky, as it effectively returns + // all other paths to the inode shared with the given path. In order to + // guard against erroneous links, only consider those where the paths + // are the same, up to the version. + String pathWithoutVersion = path.toString().substring(0, + path.toString().lastIndexOf('.')); + /* +TODO: FIXME: Amit: this code works with the internal hdfs. might not work with the +OSS version. + + for (String link : fs.getHardLinkedFiles(path)) { + if (path.toString().startsWith(pathWithoutVersion) && + getVersionFromPath(link) > 0) { + return new Path(link); + } + } + */ + return null; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/LocalReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/LocalReader.java new file mode 100644 index 0000000..fc1e877 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/LocalReader.java @@ -0,0 +1,96 @@ +package org.apache.hadoop.hbase.consensus.rmap; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class LocalReader extends RMapReader { + protected static final Logger LOG = LoggerFactory.getLogger( + LocalReader.class); + + @Override + public List getVersions(final URI uri) throws IOException { + Path path = Paths.get(uri); + List versions = new ArrayList<>(); + + for (Path match : Files.newDirectoryStream(path.getParent(), + path.getFileName() + ".*")) { + long version = getVersionFromPath(match.toString()); + if (version > 0) { + versions.add(version); + } + } + Collections.sort(versions); + return versions; + } + + @Override + public URI resolveSymbolicVersion(URI uri) throws URISyntaxException { + long version = getVersion(uri); + String schemeAndPath = getSchemeAndPath(uri); + + if (version == RMapReader.CURRENT || version == RMapReader.NEXT) { + Path link = Paths.get(String.format("%s.%s", schemeAndPath, + version == RMapReader.CURRENT ? "CURRENT" : "NEXT")); + // Resolve to an explicit version, or UNKNOWN + try { + version = getVersionFromPath(Files.readSymbolicLink(link).toString()); + } catch (IOException e) { + LOG.error("Failed to look up version from link:", e); + version = RMapReader.UNKNOWN; + } + } + + if (version > 0) { + return new URI(String.format("%s?version=%d", schemeAndPath, version)); + } + return new URI(schemeAndPath); + } + + @Override + public String readRMapAsString(final URI uri) throws IOException { + Path path = getPathWithVersion(uri); + + long n = Files.size(path); + if (n < 0 || n > MAX_SIZE_BYTES) { + throw new IOException(String.format("Invalid RMap file size " + + "(expected between 0 and %d but got %d bytes)", + MAX_SIZE_BYTES, n)); + } + + return new String(Files.readAllBytes(path)); + } + + private long getVersionFromPath(final String path) { + String[] tokens = path.split("[\\.]"); + try { + return Long.parseLong(tokens[tokens.length - 1]); + } catch (NumberFormatException e) { + // Skip if token not numerical + } + return RMapReader.UNKNOWN; + } + + private Path getPathWithVersion(final URI uri) { + long version = RMapReader.UNKNOWN; + try { + version = getVersion(resolveSymbolicVersion(uri)); + } catch (URISyntaxException e) { + // Ignore invalid URIs and assume version UNKNOWN + } + + if (version > 0) { + return Paths.get(String.format("%s.%d", uri.getPath(), version)); + } + return Paths.get(uri); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/NoSuchRMapException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/NoSuchRMapException.java new file mode 100644 index 0000000..6136063 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/NoSuchRMapException.java @@ -0,0 +1,10 @@ +package org.apache.hadoop.hbase.consensus.rmap; + +import java.io.IOException; +import java.net.URI; + +public class NoSuchRMapException extends IOException { + public NoSuchRMapException(final URI uri) { + super("No RMap found with URI " + uri); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/Parser.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/Parser.java new file mode 100644 index 0000000..8f09dd9 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/Parser.java @@ -0,0 +1,153 @@ +package org.apache.hadoop.hbase.consensus.rmap; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HServerAddress; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.util.Bytes; +import org.json.JSONArray; +import org.json.JSONException; +import org.json.JSONObject; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +public class Parser { + private Configuration conf; + + public Parser(final Configuration conf) { + this.conf = conf; + } + + public List parseEncodedRMap(JSONObject encodedRMap) + throws JSONException { + List regions = new ArrayList<>(); + JSONObject tables = encodedRMap.getJSONObject("tables"); + + for (Iterator names = tables.keys(); names.hasNext();) { + String name = names.next(); + regions.addAll(parseTable(name, tables.getJSONObject(name))); + } + + return regions; + } + + public List parseTable(String name, JSONObject table) + throws JSONException { + HTableDescriptor tableDesc = new HTableDescriptor(name); + List regions = Collections.emptyList(); + Iterator keys = table.keys(); + while (keys.hasNext()) { + String key = keys.next(); + if (key.equals("families")) { + JSONObject families = table.getJSONObject(key); + Iterator familyKeys = families.keys(); + while (familyKeys.hasNext()) { + String familyName = familyKeys.next(); + JSONObject familyJson = families.getJSONObject(familyName); + tableDesc.addFamily(parseFamily(familyName, familyJson)); + } + } else if (key.equals("regions")) { + JSONArray regionsJson = table.getJSONArray(key); + int length = regionsJson.length(); + regions = new ArrayList<>(length); + for (int i = 0; i < length; ++i) { + regions.add(parseRegion(tableDesc, regionsJson.getJSONObject(i))); + } + } else { + String value = table.get(key).toString(); + tableDesc.setValue(key, value); + } + } + return regions; + } + + public HColumnDescriptor parseFamily(String name, JSONObject family) + throws JSONException { + HColumnDescriptor columnDesc = new HColumnDescriptor(name); + Iterator keys = family.keys(); + while (keys.hasNext()) { + String key = keys.next(); + String value = family.get(key).toString(); + columnDesc.setValue(key, value); + } + return columnDesc; + } + + public HRegionInfo parseRegion(HTableDescriptor table, JSONObject region) + throws JSONException { + long id = region.getLong("id"); + byte[] startKey = Bytes.toBytes(region.getString("start_key")); + byte[] endKey = Bytes.toBytes(region.getString("end_key")); + Map> peers = parsePeers(region + .getJSONObject("peers")); + Map favoredNodesMap = parseFavoredNodesMap(region + .getJSONObject("favored_nodes")); + return new HRegionInfo(table.getTableName(), startKey, endKey, false, id, peers, + favoredNodesMap); + } + + public Map> parsePeers(JSONObject peersJson) + throws JSONException { + Map> peers = new LinkedHashMap<>(); + Iterator keys = peersJson.keys(); + while (keys.hasNext()) { + String cellName = keys.next(); + JSONArray peersWithRank = peersJson.getJSONArray(cellName); + peers.put(cellName, parsePeersWithRank(peersWithRank)); + } + return peers; + } + + public Map parsePeersWithRank(JSONArray peersJson) + throws JSONException { + Map peers = new LinkedHashMap<>(); + for (int i = 0; i < peersJson.length(); ++i) { + String peer = peersJson.getString(i); + int colonIndex = peer.lastIndexOf(':'); + peers.put(ServerName.valueOf(peer.substring(0, colonIndex)), + Integer.valueOf(peer.substring(colonIndex + 1))); + } + return peers; + } + + Map parseFavoredNodesMap(JSONObject favoredNodesJson) + throws JSONException { + Iterator keys = favoredNodesJson.keys(); + + HashMap favoredNodesMap = new HashMap<>(); + while (keys.hasNext()) { + String cellName = keys.next(); + JSONArray peersWithRank = favoredNodesJson.getJSONArray(cellName); + favoredNodesMap.put(cellName, parseFavoredNodes(peersWithRank)); + } + return favoredNodesMap; + } + + public InetSocketAddress[] parseFavoredNodes(JSONArray favoredNodesInCell) + throws JSONException { + if (favoredNodesInCell == null) { + return null; + } else { + int length = favoredNodesInCell.length(); + InetSocketAddress[] favoredNodes = new InetSocketAddress[length]; + for (int i = 0; i < length; ++i) { + String node = favoredNodesInCell.getString(i); + int colonIndex = node.lastIndexOf(':'); + favoredNodes[i] = new InetSocketAddress(node.substring(0, colonIndex), + Integer.parseInt(node.substring(colonIndex + 1))); + + } + return favoredNodes; + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapConfiguration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapConfiguration.java new file mode 100644 index 0000000..df6a43c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapConfiguration.java @@ -0,0 +1,270 @@ +package org.apache.hadoop.hbase.consensus.rmap; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.cli.PosixParser; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.json.JSONException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.text.SimpleDateFormat; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +public class RMapConfiguration { + private static final Logger LOG = LoggerFactory.getLogger(RMapConfiguration.class); + + private Configuration conf; + + private Map appliedRMaps; + private Map cachedRMaps; + + public RMapConfiguration(final Configuration conf) { + this.conf = conf; + this.appliedRMaps = new HashMap<>(); + this.cachedRMaps = new HashMap<>(); + } + + public static URI getRMapSubscription(final Configuration conf) { + String[] subscriptionsList = + conf.get(HConstants.RMAP_SUBSCRIPTION, "").split(","); + if (subscriptionsList.length >= 1) { + if (subscriptionsList.length > 1) { + LOG.warn(String.format("We do not support multiple RMaps. " + + "Using the first RMap as the correct one: %s", subscriptionsList[0])); + } + else if (!subscriptionsList[0].equals("")) { + try { + return new URI(subscriptionsList[0]); + } catch (URISyntaxException e) { + LOG.warn(String.format("Failed to parse URI for subscription %s: ", + subscriptionsList[0]), e); + } + } + } + return null; + } + + public static RMapReader getRMapReader(final Configuration conf, + final URI uri) throws RMapException { + switch (uri.getScheme()) { + case "file": + return new LocalReader(); + case "hdfs": + return new HDFSReader(conf); + default: + throw new RMapException("No reader found for RMap: " + uri); + } + } + + public synchronized RMap getRMap(URI uri) + throws IOException, RMapException { + return getRMap(uri, false); + } + + public synchronized RMap getRMap(URI uri, boolean reload) + throws IOException, RMapException { + try { + RMapReader reader = getRMapReader(conf, uri); + URI nonSymbolicURI = reader.resolveSymbolicVersion(uri); + // Try to get a cached instance of the RMap. + RMap rmap = cachedRMaps.get(nonSymbolicURI); + if (reload || rmap == null) { + // No cached instance was found, read it using the reader. + RMapJSON encodedRMap = reader.readRMap(nonSymbolicURI); + rmap = new RMap(encodedRMap.uri, + new Parser(conf).parseEncodedRMap(encodedRMap.getEncodedRMap()), + encodedRMap.signature); + cachedRMaps.put(rmap.uri, rmap); + } + return rmap; + } catch (URISyntaxException e) { + throw new RMapException("URI syntax invalid for RMap: " + uri, e); + } catch (JSONException e) { + throw new RMapException("Failed to decode JSON for RMap: " + uri, e); + } + } + + /** + * Reads and caches the RMap from the given URI and returns its signature. + * + * @param uri + * @return + */ + public synchronized String readRMap(final URI uri) throws IOException, + RMapException { + return getRMap(uri).signature; + } + + public synchronized String readRMap(URI uri, boolean reload) + throws IOException, RMapException { + return getRMap(uri, reload).signature; + } + + /** + * Get the list of regions which need to be updated in order to transition to + * this (version) of the RMap by the given URI. + * + * @param uri of the RMap + * @return a list of regions + */ + public synchronized Collection getTransitionDelta(final URI uri) + throws IOException, RMapException { + RMap nextRMap = getRMap(uri); + RMap currentRMap = appliedRMaps.get(RMapReader.getSchemeAndPath(uri)); + + // The standard Set implementations seem to be using compareTo() for their + // operations. On the HRegionInfo objects compareTo() and equals() have + // different properties where equals() is needed here. What follows is a + // poor mans Set comparison to determine which regions need to be modified + // to make the RMap transition. + if (nextRMap != null) { + HashMap delta = new HashMap<>(); + for (HRegionInfo next : nextRMap.regions) { + delta.put(next.getEncodedName(), next); + } + + if (currentRMap != null) { + // Remove all regions already present in the current RMap from the + // delta. This should use the {@link HRegionInfo.equals} method as it + // should consider the favored nodes and replicas. + for (HRegionInfo current : currentRMap.regions) { + HRegionInfo next = delta.get(current.getEncodedName()); + if (next != null) { + if (next.equals(current)) { + delta.remove(next.getEncodedName()); + } + } + } + } + + return delta.values(); + } + + return Collections.emptyList(); + } + + public synchronized void appliedRMap(final URI uri) throws IOException, + RMapException { + RMap previous = appliedRMaps.put(RMapReader.getSchemeAndPath(uri), + getRMap(uri)); + // Purge the earlier version of the RMap from cache. + if (previous != null) { + cachedRMaps.remove(previous.uri); + } + } + + public synchronized boolean isRMapApplied(final URI uri) { + RMap active = appliedRMaps.get(RMapReader.getSchemeAndPath(uri)); + if (active != null) { + return active.uri.equals(uri); + } + return false; + } + + public synchronized RMap getAppliedRMap(String uri) { + return appliedRMaps.get(uri); + } + + public synchronized List getRegions(final URI uri) + throws IOException, RMapException { + RMap rmap = getRMap(uri); + if (rmap == null) { + return Collections.emptyList(); + } + return Collections.unmodifiableList(rmap.regions); + } + + public synchronized void clearFromRMapCache(URI uri) { + cachedRMaps.remove(uri); + } + + /** + * Replace the content of cached RMap. For testing only! + * + * @param uri + * @param rMap + */ + public synchronized void cacheCustomRMap(URI uri, RMap rMap) { + cachedRMaps.put(uri, rMap); + appliedRMaps.put(uri.toString(), rMap); + } + + public class RMap { + public final URI uri; + public final List regions; + public final String signature; + + RMap(final URI uri, final List regions, + final String signature) { + this.uri = uri; + this.regions = regions; + this.signature = signature; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || !(obj instanceof RMap)) { + return false; + } + RMap that = (RMap)obj; + if (this.regions == null || that.regions == null || this.regions.size() != that.regions.size()) { + return false; + } + Set regionInfos = new TreeSet<>(); + regionInfos.addAll(regions); + for (HRegionInfo region : that.regions) { + if (!regionInfos.contains(region)) { + return false; + } + regionInfos.remove(region); + } + return regionInfos.isEmpty(); + } + } + + /** + * Creates a temporary name for an RMap, based on the date and time. + * @return + */ + public static String createRMapName() { + SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd-HHmmss"); + return "rmap.json." + format.format(System.currentTimeMillis()); + } + + /** + * View information about an RMap. Currently only prints its signature. + * @param args + */ + public static void main(String[] args) throws ParseException, + URISyntaxException, RMapException, IOException { + Options options = new Options(); + options.addOption("r", "rmap", true, "Name of the rmap"); + + CommandLineParser parser = new PosixParser(); + CommandLine cmd = parser.parse(options, args); + + if (!cmd.hasOption("r")) { + System.out.println("Please specify the rmap with -r"); + return; + } + + String rmapUriStr = cmd.getOptionValue("r"); + RMapConfiguration conf = new RMapConfiguration(new Configuration()); + String rmapStr = conf.readRMap(new URI(rmapUriStr)); + LOG.debug("RMap Signature: " + rmapStr); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapException.java new file mode 100644 index 0000000..31621ab --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapException.java @@ -0,0 +1,11 @@ +package org.apache.hadoop.hbase.consensus.rmap; + +public class RMapException extends Exception { + public RMapException(final String message) { + super(message); + } + + public RMapException(final String message, final Throwable cause) { + super(message, cause); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapJSON.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapJSON.java new file mode 100644 index 0000000..6d06123 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapJSON.java @@ -0,0 +1,34 @@ +package org.apache.hadoop.hbase.consensus.rmap; + +import org.json.JSONObject; + +import java.net.URI; + +public class RMapJSON { + final URI uri; + final JSONObject rmap; + final String signature; + + public RMapJSON(final URI uri, final JSONObject rmap, + final String signature) { + this.uri = uri; + this.rmap = rmap; + this.signature = signature; + } + + public long getVersion() { + return RMapReader.getVersion(uri); + } + + public URI getURI() { + return uri; + } + + public JSONObject getEncodedRMap() { + return rmap; + } + + public String getSignature() { + return signature; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapReader.java new file mode 100644 index 0000000..dc81d34 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/consensus/rmap/RMapReader.java @@ -0,0 +1,205 @@ +package org.apache.hadoop.hbase.consensus.rmap; + +import org.apache.commons.codec.binary.Hex; +//import org.apache.hadoop.hbase.thrift.generated.Hbase; +import org.apache.http.NameValuePair; +import org.apache.http.client.utils.URLEncodedUtils; +import org.json.JSONException; +import org.json.JSONObject; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.List; + +public abstract class RMapReader { + /** Max file sisze of a single file containing a RMap */ + public static long MAX_SIZE_BYTES = 16 * 1024 * 1204; // 16 MB + + /** RMap version special values */ + public static long NEXT = -2; + public static long CURRENT = -1; + public static long UNKNOWN = 0; + + /** + * Return a naturally sorted list of available versions of a given RMap URI. + * + * @param uri URI of the RMap + * @return a naturally sorted list of versions of the given RMap URI + * @throws IOException if an exception occurs while reading versions + */ + public abstract List getVersions(final URI uri) throws IOException; + + /** + * Resolve a URI containing a symbolic version into a URI with an absolute + * value which can be requested from the reader. + * + * @param uri URI containing a symbolic version + * @return a URI containing an absolute version + * @throws URISyntaxException if the given URI is malformed + */ + public abstract URI resolveSymbolicVersion(final URI uri) + throws URISyntaxException; + + /** + * Return the contents of the RMap at given URI as a string. + * + * @param uri URI of the RMap + * @return contents of the RMap as String + * @throws IOException if an exception occurs while reading the RMap + */ + public abstract String readRMapAsString(final URI uri) throws IOException; + + /** + * Return the version number of the RMap specified in the given URI. + * + * @param uri URI of the RMap + * @return the version number of the RMap or 0 if no version was found + */ + public static long getVersion(final URI uri) { + for (NameValuePair param : URLEncodedUtils.parse(uri, "UTF-8")) { + if (param.getName().equals("version")) { + switch (param.getValue().toUpperCase()) { + case "NEXT": + return NEXT; + case "CURRENT": + return CURRENT; + default: + try { + return Long.parseLong(param.getValue()); + } catch (NumberFormatException e) { + /* Ignore if NaN */ + } + } + } + } + return UNKNOWN; + } + + public static boolean isSymbolicVersion(final URI uri) { + return getVersion(uri) < 0; + } + + /** + * Read and return a {@link RMapJSON} of the RMap at the given URI. + * + * @param uri URI of the RMap + * @return a JSON representation of the RMap + * @throws IOException if an (possible transient) exception occurs while + * reading the RMap + * @throws RMapException if any other exception occurs while reading the RMap + */ + public RMapJSON readRMap(final URI uri) throws IOException, RMapException { + URI nonSymbolicURI; + try { + nonSymbolicURI = resolveSymbolicVersion(uri); + String encodedRMap = readRMapAsString(nonSymbolicURI); + return new RMapJSON(nonSymbolicURI, new JSONObject(encodedRMap), + getSignature(encodedRMap)); + } catch (URISyntaxException e) { + throw new RMapException("URI syntax invalid for RMap: " + uri, e); + } catch (JSONException e) { + throw new RMapException( + "Failed to decode JSON string for RMap: " + uri, e); + } catch (NoSuchAlgorithmException e) { + throw new RMapException( + "Failed to generate signature for RMap: " + uri, e); + } + } + + /** + * Get a MD5 hash of the given string. + * + * @param s string to be hashed + * @return a hex String representation of the hash + * @throws NoSuchAlgorithmException if MD5 message digest is unavailable + */ + public static String getSignature(final String s) + throws NoSuchAlgorithmException { + return new String(Hex.encodeHex( + MessageDigest.getInstance("MD5").digest(s.getBytes()))); + } + + /** + * Get a MD5 hash of the given string. + * + * @param s string to be hashed + * @return a hex String representation of the hash + * @throws NoSuchAlgorithmException if MD5 message digest is unavailable + */ + public String getSignature(final URI uri) throws IOException, RMapException { + URI nonSymbolicURI; + try { + nonSymbolicURI = resolveSymbolicVersion(uri); + String encodedRMap = readRMapAsString(nonSymbolicURI); + return getSignature(encodedRMap); + } catch (URISyntaxException e) { + throw new RMapException("URI syntax invalid for RMap: " + uri, e); + } catch (NoSuchAlgorithmException e) { + throw new RMapException( + "Failed to generate signature for RMap: " + uri, e); + } + } + + /** + * Get the scheme, authority (if present) and path of a given URI as a string. + * @param uri + * @return a string containing just the scheme, authority and path + */ + public static String getSchemeAndPath(final URI uri) { + return String.format("%s:%s%s", uri.getScheme(), + uri.getAuthority() != null ? + String.format("//%s", uri.getAuthority()) : "", + uri.getPath()); + } + + /** + * Get a versioned URI for the RMap with given scheme, path and version. + * @param schemeAndPath + * @param version + * @return a URI of the form [scheme]:[authority]//[path]?version=[version] + * @throws URISyntaxException + */ + public static URI getVersionedURI(final String schemeAndPath, + final long version) throws URISyntaxException { + String token = "UNKNOWN"; + + if (version > 0) { + token = String.format("%d", version); + } else if (version == CURRENT) { + token = "CURRENT"; + } else if (version == NEXT) { + token = "NEXT"; + } + + return new URI(String.format("%s?version=%s", schemeAndPath, token)); + } + + /** + * Get a versioned URI for the RMap with given base URI and version. If the + * given URI already contains a version it is overwritten by the given + * version. + * @param uri + * @param version + * @return a URI of the form [scheme]:[authority]//[path]?version=[version] + * @throws URISyntaxException + */ + public static URI getVersionedURI(final URI uri, final long version) + throws URISyntaxException { + return getVersionedURI(getSchemeAndPath(uri), version); + } + + public long getCurrentVersion(final String schemeAndPath) + throws URISyntaxException { + return getVersion(resolveSymbolicVersion( + getVersionedURI(schemeAndPath, CURRENT))); + } + + public long getNextVersion(final String schemeAndPath) + throws URISyntaxException { + return getVersion(resolveSymbolicVersion( + getVersionedURI(schemeAndPath, NEXT))); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestParser.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestParser.java new file mode 100644 index 0000000..e8c40de --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestParser.java @@ -0,0 +1,97 @@ +package org.apache.hadoop.hbase.consensus.rmap; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.util.Bytes; +import org.json.JSONException; +import org.json.JSONObject; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +public class TestParser { + Configuration conf; + Parser parser; + JSONObject rmapAsJSON; + + @Before + public void setUp() throws IOException, JSONException { + conf = HBaseConfiguration.create(); + conf.set(HConstants.HYDRABASE_DCNAME, "DUMMYCLUSTER1"); + + parser = new Parser(conf); + rmapAsJSON = new JSONObject(new String(Files.readAllBytes( + Paths.get(getClass().getResource("rmap.json").getPath())))); + } + + @Test + public void testParseRMap() throws IOException, JSONException { + List regions = parser.parseTable("RPCBenchmarkingTable", + getTableObjectFromJSON("RPCBenchmarkingTable")); + assertEquals(3, regions.size()); + HRegionInfo region = regions.get(0); + // TODO @gauravm + // Fix this. + /* + HTableDescriptor table = region.getTableDesc(); + assertEquals("RPCBenchmarkingTable", table.getNameAsString()); + assertFalse(table.isMetaTable()); + assertFalse(table.isRootRegion()); + HColumnDescriptor[] columnFamilies = table.getColumnFamilies(); + assertEquals(1, columnFamilies.length); + HColumnDescriptor cf0 = columnFamilies[0]; + assertEquals("cf", cf0.getNameAsString()); + assertEquals("true", cf0.getValue("BLOCKCACHE")); + assertEquals("65536", cf0.getValue("BLOCKSIZE")); + assertEquals("NONE", cf0.getValue("BLOOMFILTER")); + assertEquals("0.01", cf0.getValue("BLOOMFILTER_ERRORRATE")); + assertEquals("NONE", cf0.getValue("COMPRESSION")); + assertEquals("NONE", cf0.getValue("DATA_BLOCK_ENCODING")); + assertEquals("true", cf0.getValue("ENCODE_ON_DISK")); + assertEquals("false", cf0.getValue("IN_MEMORY")); + assertEquals("0", cf0.getValue("REPLICATION_SCOPE")); + assertEquals("2147483647", cf0.getValue("TTL")); + assertEquals("2147483647", cf0.getValue("VERSIONS")); + + assertEquals("aeeb54dc6fbca609443bd35796b59da5", region.getEncodedName()); + assertEquals("", Bytes.toString(region.getStartKey())); + assertEquals("2aaaaaaa", Bytes.toString(region.getEndKey())); + assertEquals(1373324048180L, region.getRegionId()); + + InetSocketAddress[] favoredNodes = + region.getFavoredNodesMap().get("DUMMYCLUSTER1"); + assertEquals(3, favoredNodes.length); + assertEquals(new InetSocketAddress("10.159.9.49", 60020), favoredNodes[0]); + assertEquals(new InetSocketAddress("10.159.9.45", 60020), favoredNodes[1]); + assertEquals(new InetSocketAddress("10.159.9.47", 60020), favoredNodes[2]); + + Map> peers = region.getPeers(); + assertEquals(1, peers.size()); + Map peersWithRank = region.getPeersWithRank(); + assertEquals(3, peersWithRank.size()); + assertEquals(new Integer(1), + peersWithRank.get(new HServerAddress("10.159.9.41:60020"))); + assertEquals(new Integer(2), + peersWithRank.get(new HServerAddress("10.159.9.45:60020"))); + assertEquals(new Integer(3), + peersWithRank.get(new HServerAddress("10.159.9.47:60020"))); + assertEquals(peers.get("DUMMYCLUSTER1"), peersWithRank); + + assertEquals(null, peersWithRank.get(new HServerAddress("1.1.1.1:11111"))); + */ + } + + private JSONObject getTableObjectFromJSON(final String name) + throws JSONException { + return rmapAsJSON.getJSONObject("tables").getJSONObject(name); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRMapConfiguration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRMapConfiguration.java new file mode 100644 index 0000000..4c22dc1 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRMapConfiguration.java @@ -0,0 +1,55 @@ +package org.apache.hadoop.hbase.consensus.rmap; + +import static org.junit.Assert.*; + +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.junit.Before; +import org.junit.Test; + +public class TestRMapConfiguration { + private Configuration conf; + private RMapConfiguration rmapConf; + + @Before + public void setUp() throws Exception { + conf = HBaseConfiguration.create(); + conf.set(HConstants.RMAP_SUBSCRIPTION, + getClass().getResource("rmap.json").toURI().toString()); + conf.set(HConstants.HYDRABASE_DCNAME, "DUMMYCLUSTER1"); + + rmapConf = new RMapConfiguration(conf); + URI uri = RMapConfiguration.getRMapSubscription(conf); + rmapConf.readRMap(uri); + rmapConf.appliedRMap(uri); + } + + @Test + public void testReadingEmptyRMapSubscription() { + conf.set(HConstants.RMAP_SUBSCRIPTION, ""); + assertNull("RMap subscription should be empty", + rmapConf.getRMapSubscription(conf)); + } + + @Test + public void testReadingNonEmptyRMapSubscription() + throws URISyntaxException { + conf.set(HConstants.RMAP_SUBSCRIPTION, + "hbase/rmaps/map1"); + URI expectedRMapSubscription = new URI("hbase/rmaps/map1"); + assertEquals(expectedRMapSubscription, + rmapConf.getRMapSubscription(conf)); + } + + @Test + public void shouldApplyRMap() { + URI subscription = RMapConfiguration.getRMapSubscription(conf); + assertTrue(rmapConf.isRMapApplied(subscription)); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRMapReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRMapReader.java new file mode 100644 index 0000000..4e7a798 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/consensus/rmap/TestRMapReader.java @@ -0,0 +1,102 @@ +package org.apache.hadoop.hbase.consensus.rmap; + +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.security.NoSuchAlgorithmException; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TestRMapReader { + static String base = "file:/some/rmap.json"; + + RMapReader reader; + URI current, next, v2, v3; + + @Test + public void shouldReturnVersionedURI() throws URISyntaxException { + assertEquals(v2, RMapReader.getVersionedURI(base, 2)); + } + + @Test + public void shouldReturnCurrentVersionedURI() throws URISyntaxException { + assertEquals(current, RMapReader.getVersionedURI(base, RMapReader.CURRENT)); + } + + @Test + public void shouldReturnNextVersionedURI() throws URISyntaxException { + assertEquals(next, RMapReader.getVersionedURI(base, RMapReader.NEXT)); + } + + @Test + public void shouldReturnAbsoluteVersion() throws URISyntaxException { + assertEquals(2, RMapReader.getVersion(v2)); + } + + @Test + public void shouldReturnCurrentSymbolicVersion() throws URISyntaxException { + assertEquals(RMapReader.CURRENT, RMapReader.getVersion(current)); + } + + @Test + public void shouldReturnNextSymbolicVersion() throws URISyntaxException { + assertEquals(RMapReader.NEXT, RMapReader.getVersion(next)); + } + + @Test + public void shouldReturnUnknownSymbolicVersion() throws URISyntaxException { + assertEquals(RMapReader.UNKNOWN, + RMapReader.getVersion(new URI(base + "?version=FOO"))); + } + + @Test + public void shouldResolveSymbolicVersionAndReturnRMap() + throws URISyntaxException, IOException, RMapException { + // Stub the abstract methods and forward call to RMapReader.readRMap(). + // This is a bit frowned upon. + when(reader.resolveSymbolicVersion(current)).thenReturn(v2); + when(reader.readRMapAsString(v2)).thenReturn("{}"); + when(reader.readRMap(current)).thenCallRealMethod(); + + RMapJSON rmap = reader.readRMap(current); + assertEquals(v2, rmap.uri); + assertEquals("{}", rmap.rmap.toString()); + } + + @Test + public void shouldReturnMD5HashAsHex() throws NoSuchAlgorithmException { + assertEquals("99914b932bd37a50b983c5e7c90ae93b", + RMapReader.getSignature("{}")); + } + + @Test + public void shouldReturnCurrentVersion() throws URISyntaxException { + when(reader.resolveSymbolicVersion(current)).thenReturn(v2); + when(reader.getCurrentVersion(base)).thenCallRealMethod(); + + assertEquals(2, reader.getCurrentVersion(base)); + } + + @Test + public void shoudlReturnNextVersion() throws URISyntaxException { + when(reader.resolveSymbolicVersion(next)).thenReturn(v3); + when(reader.getNextVersion(base)).thenCallRealMethod(); + + assertEquals(3, reader.getNextVersion(base)); + } + + @Before + public void setUp() throws URISyntaxException, IOException, RMapException { + reader = mock(RMapReader.class); + // URIs can not be created outside of the method. + current = RMapReader.getVersionedURI(base, RMapReader.CURRENT); + next = RMapReader.getVersionedURI(base, RMapReader.NEXT); + v2 = RMapReader.getVersionedURI(base, 2); + v3 = RMapReader.getVersionedURI(base, 3); + } +} -- 2.1.1