action : actions) {
@@ -989,7 +992,7 @@ class AsyncProcess {
}
}
- /**
+ /**
* Only used w/useGlobalErrors ctor argument, for HTable backward compat.
* @return Whether there were any errors in any request since the last time
* {@link #waitForAllPreviousOpsAndReset(List)} was called, or AP was created.
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
index d1a3ece..eb7ef4f 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
@@ -27,11 +27,11 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
@@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
* keeps a cache of locations and then knows how to re-calibrate after they move. You need one
* of these to talk to your HBase cluster. {@link HConnectionManager} manages instances of this
* class. See it for how to get one of these.
- *
+ *
* This is NOT a connection to a particular server but to ALL servers in the cluster. Individual
* connections are managed at a lower level.
*
@@ -251,7 +251,9 @@ public interface HConnection extends Abortable, Closeable {
* @return HRegionLocation that describes where to find the region in
* question
* @throws IOException if a remote or network exception occurs
+ * @deprecated This is no longer a public API
*/
+ @Deprecated
public HRegionLocation locateRegion(final TableName tableName,
final byte [] row) throws IOException;
@@ -305,11 +307,12 @@ public interface HConnection extends Abortable, Closeable {
* Update the location cache. This is used internally by HBase, in most cases it should not be
* used by the client application.
* @param tableName the table name
+ * @param regionName the regionName
* @param rowkey the row
* @param exception the exception if any. Can be null.
* @param source the previous location
*/
- void updateCachedLocations(TableName tableName, byte[] rowkey,
+ void updateCachedLocations(TableName tableName, byte[] regionName, byte[] rowkey,
Object exception, ServerName source);
@Deprecated
@@ -345,7 +348,9 @@ public interface HConnection extends Abortable, Closeable {
* regions from returned list.
* @return list of region locations for all regions of table
* @throws IOException
+ * @deprecated This is no longer a public API
*/
+ @Deprecated
public List locateRegions(final TableName tableName,
final boolean useCache,
final boolean offlined) throws IOException;
@@ -388,6 +393,7 @@ public interface HConnection extends Abortable, Closeable {
* @throws IOException if a remote or network exception occurs
* @deprecated You can pass master flag but nothing special is done.
*/
+ @Deprecated
AdminService.BlockingInterface getAdmin(final ServerName serverName, boolean getMaster)
throws IOException;
@@ -478,6 +484,7 @@ public interface HConnection extends Abortable, Closeable {
* @throws IOException if a remote or network exception occurs
* @deprecated This method will be changed from public to package protected.
*/
+ @Deprecated
int getCurrentNrHRS() throws IOException;
/**
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
index eed9e34..f720e65 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
@@ -35,9 +35,6 @@ import java.util.NavigableMap;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.ConcurrentSkipListSet;
-import java.util.concurrent.CopyOnWriteArraySet;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
@@ -57,6 +54,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.RegionTooBusyException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Stoppable;
@@ -64,6 +62,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.AsyncProcess.AsyncRequestFuture;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
@@ -77,7 +76,87 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.*;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.UserProvider;
@@ -402,6 +481,7 @@ public class HConnectionManager {
* @param conf configuration whose identity is used to find {@link HConnection} instance.
* @deprecated
*/
+ @Deprecated
public static void deleteConnection(Configuration conf) {
deleteConnection(new HConnectionKey(conf), false);
}
@@ -413,6 +493,7 @@ public class HConnectionManager {
* @param connection
* @deprecated
*/
+ @Deprecated
public static void deleteStaleConnection(HConnection connection) {
deleteConnection(connection, true);
}
@@ -423,6 +504,7 @@ public class HConnectionManager {
* staleConnection to true.
* @deprecated
*/
+ @Deprecated
public static void deleteAllConnections(boolean staleConnection) {
synchronized (CONNECTION_INSTANCES) {
Set connectionKeys = new HashSet();
@@ -479,12 +561,14 @@ public class HConnectionManager {
* @return Number of cached regions for the table.
* @throws ZooKeeperConnectionException
*/
+ @VisibleForTesting
static int getCachedRegionCount(Configuration conf, final TableName tableName)
throws IOException {
return execute(new HConnectable(conf) {
@Override
public Integer connect(HConnection connection) {
- return ((HConnectionImplementation)connection).getNumberOfCachedRegionLocations(tableName);
+ return ((HConnectionImplementation)connection).metaCache
+ .getNumberOfCachedRegionLocations(tableName);
}
});
}
@@ -495,6 +579,7 @@ public class HConnectionManager {
* @return true if the region where the table and row reside is cached.
* @throws ZooKeeperConnectionException
*/
+ @VisibleForTesting
static boolean isRegionCached(Configuration conf,
final TableName tableName,
final byte[] row)
@@ -502,7 +587,7 @@ public class HConnectionManager {
return execute(new HConnectable(conf) {
@Override
public Boolean connect(HConnection connection) {
- return ((HConnectionImplementation) connection).isRegionCached(tableName, row);
+ return ((HConnectionImplementation) connection).metaCache.isRegionCached(tableName, row);
}
});
}
@@ -584,24 +669,7 @@ public class HConnectionManager {
// Client rpc instance.
private RpcClient rpcClient;
- /**
- * Map of table to table {@link HRegionLocation}s.
- */
- private final ConcurrentMap>
- cachedRegionLocations =
- new ConcurrentHashMap>();
-
- // The presence of a server in the map implies it's likely that there is an
- // entry in cachedRegionLocations that map to this server; but the absence
- // of a server in this map guarentees that there is no entry in cache that
- // maps to the absent server.
- // The access to this attribute must be protected by a lock on cachedRegionLocations
- private final Set cachedServers = new ConcurrentSkipListSet();
-
- // region cache prefetch is enabled by default. this set contains all
- // tables whose region cache prefetch are disabled.
- private final Set regionCachePrefetchDisabledTables =
- new CopyOnWriteArraySet();
+ private MetaCache metaCache = new MetaCache();
private int refCount;
@@ -813,6 +881,7 @@ public class HConnectionManager {
* An identifier that will remain the same for a given connection.
* @return
*/
+ @Override
public String toString(){
return "hconnection-0x" + Integer.toHexString(hashCode());
}
@@ -984,8 +1053,9 @@ public class HConnectionManager {
@Override
public HRegionLocation locateRegion(final byte[] regionName) throws IOException {
- return locateRegion(HRegionInfo.getTable(regionName),
+ RegionLocations locations = locateRegion(HRegionInfo.getTable(regionName),
HRegionInfo.getStartKey(regionName), false, true);
+ return locations == null ? null : locations.getRegionLocation();
}
@Override
@@ -1016,7 +1086,15 @@ public class HConnectionManager {
tableName, offlined);
final List locations = new ArrayList();
for (HRegionInfo regionInfo : regions.keySet()) {
- locations.add(locateRegion(tableName, regionInfo.getStartKey(), useCache, true));
+ RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
+ if (list != null) {
+ for (HRegionLocation loc : list.getRegionLocations()) {
+ if (loc != null) {
+ locations.add(loc);
+ }
+ }
+ }
+
}
return locations;
}
@@ -1031,7 +1109,8 @@ public class HConnectionManager {
public HRegionLocation locateRegion(final TableName tableName,
final byte [] row)
throws IOException{
- return locateRegion(tableName, row, true, true);
+ RegionLocations locations = locateRegion(tableName, row, true, true);
+ return locations == null ? null : locations.getRegionLocation();
}
@Override
@@ -1051,7 +1130,8 @@ public class HConnectionManager {
throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
}
- return locateRegion(tableName, row, false, true);
+ RegionLocations locations = locateRegion(tableName, row, false, true);
+ return locations == null ? null : locations.getRegionLocation();
}
@Override
@@ -1061,7 +1141,7 @@ public class HConnectionManager {
}
- private HRegionLocation locateRegion(final TableName tableName,
+ private RegionLocations locateRegion(final TableName tableName,
final byte [] row, boolean useCache, boolean retry)
throws IOException {
if (this.closed) throw new IOException(toString() + " closed");
@@ -1089,12 +1169,14 @@ public class HConnectionManager {
// Implement a new visitor for MetaScanner, and use it to walk through
// the hbase:meta
MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
+ @Override
public boolean processRow(Result result) throws IOException {
try {
- HRegionInfo regionInfo = MetaScanner.getHRegionInfo(result);
- if (regionInfo == null) {
+ RegionLocations locations = MetaReader.getRegionLocations(result);
+ if (locations == null) {
return true;
}
+ HRegionInfo regionInfo = locations.getRegionLocation().getRegionInfo();
// possible we got a region of a different table...
if (!regionInfo.getTable().equals(tableName)) {
@@ -1105,15 +1187,13 @@ public class HConnectionManager {
return true;
}
- ServerName serverName = HRegionInfo.getServerName(result);
+ ServerName serverName = locations.getRegionLocation().getServerName();
if (serverName == null) {
return true; // don't cache it
}
- // instantiate the location
- long seqNum = HRegionInfo.getSeqNumDuringOpen(result);
- HRegionLocation loc = new HRegionLocation(regionInfo, serverName, seqNum);
+
// cache this meta entry
- cacheLocation(tableName, null, loc);
+ cacheLocation(tableName, locations);
return true;
} catch (RuntimeException e) {
throw new IOException(e);
@@ -1137,11 +1217,11 @@ public class HConnectionManager {
* Search the hbase:meta table for the HRegionLocation
* info that contains the table and row we're seeking.
*/
- private HRegionLocation locateRegionInMeta(final TableName parentTable,
+ private RegionLocations locateRegionInMeta(final TableName parentTable,
final TableName tableName, final byte [] row, boolean useCache,
Object regionLockObject, boolean retry)
throws IOException {
- HRegionLocation location;
+ RegionLocations location;
// If we are supposed to be using the cache, look in the cache to see if
// we already have the region.
if (useCache) {
@@ -1165,7 +1245,8 @@ public class HConnectionManager {
HRegionLocation metaLocation = null;
try {
// locate the meta region
- metaLocation = locateRegion(parentTable, metaKey, true, false);
+ RegionLocations metaLocations = locateRegion(parentTable, metaKey, true, false);
+ metaLocation = metaLocations == null ? null : metaLocations.getRegionLocation();
// If null still, go around again.
if (metaLocation == null) continue;
ClientService.BlockingInterface service = getClient(metaLocation.getServerName());
@@ -1196,7 +1277,7 @@ public class HConnectionManager {
} else {
// If we are not supposed to be using the cache, delete any existing cached location
// so it won't interfere.
- forceDeleteCachedLocation(tableName, row);
+ metaCache.clearCache(tableName, row);
}
// Query the meta region for the location of the meta region
@@ -1209,7 +1290,8 @@ public class HConnectionManager {
}
// convert the row result into the HRegionLocation we need!
- HRegionInfo regionInfo = MetaScanner.getHRegionInfo(regionInfoRow);
+ location = MetaReader.getRegionLocations(regionInfoRow);
+ HRegionInfo regionInfo = location.getRegionLocation().getRegionInfo();
if (regionInfo == null) {
throw new IOException("HRegionInfo was null or empty in " +
parentTable + ", row=" + regionInfoRow);
@@ -1233,7 +1315,7 @@ public class HConnectionManager {
regionInfo.getRegionNameAsString());
}
- ServerName serverName = HRegionInfo.getServerName(regionInfoRow);
+ ServerName serverName = location.getRegionLocation().getServerName();
if (serverName == null) {
throw new NoServerForRegionException("No server address listed " +
"in " + parentTable + " for region " +
@@ -1247,10 +1329,7 @@ public class HConnectionManager {
", but it is dead.");
}
- // Instantiate the location
- location = new HRegionLocation(regionInfo, serverName,
- HRegionInfo.getSeqNumDuringOpen(regionInfoRow));
- cacheLocation(tableName, null, location);
+ cacheLocation(tableName, location);
return location;
} catch (TableNotFoundException e) {
// if we got this error, probably means the table just plain doesn't
@@ -1290,7 +1369,16 @@ public class HConnectionManager {
}
}
- /*
+ /**
+ * Put a newly discovered HRegionLocation into the cache.
+ * @param tableName The table name.
+ * @param location the new location
+ */
+ private void cacheLocation(final TableName tableName, final RegionLocations location) {
+ metaCache.cacheLocation(tableName, location);
+ }
+
+ /**
* Search the cache for a location that fits our table and row key.
* Return null if no suitable region is located.
*
@@ -1298,31 +1386,9 @@ public class HConnectionManager {
* @param row
* @return Null or region location found in cache.
*/
- HRegionLocation getCachedLocation(final TableName tableName,
+ RegionLocations getCachedLocation(final TableName tableName,
final byte [] row) {
- ConcurrentSkipListMap tableLocations =
- getTableLocations(tableName);
-
- Entry e = tableLocations.floorEntry(row);
- if (e == null) {
- return null;
- }
- HRegionLocation possibleRegion = e.getValue();
-
- // make sure that the end key is greater than the row we're looking
- // for, otherwise the row actually belongs in the next region, not
- // this one. the exception case is when the endkey is
- // HConstants.EMPTY_END_ROW, signifying that the region we're
- // checking is actually the last region in the table.
- byte[] endKey = possibleRegion.getRegionInfo().getEndKey();
- if (Bytes.equals(endKey, HConstants.EMPTY_END_ROW) ||
- tableName.getRowComparator().compareRows(
- endKey, 0, endKey.length, row, 0, row.length) > 0) {
- return possibleRegion;
- }
-
- // Passed all the way through, so we got nothing - complete cache miss
- return null;
+ return metaCache.getCachedLocation(tableName, row);
}
/**
@@ -1330,20 +1396,8 @@ public class HConnectionManager {
* @param tableName tableName
* @param row
*/
- void forceDeleteCachedLocation(final TableName tableName, final byte [] row) {
- HRegionLocation rl = null;
- Map tableLocations = getTableLocations(tableName);
- // start to examine the cache. we can only do cache actions
- // if there's something in the cache for this table.
- rl = getCachedLocation(tableName, row);
- if (rl != null) {
- tableLocations.remove(rl.getRegionInfo().getStartKey());
- }
- if ((rl != null) && LOG.isDebugEnabled()) {
- LOG.debug("Removed " + rl.getHostname() + ":" + rl.getPort()
- + " as a location of " + rl.getRegionInfo().getRegionNameAsString() +
- " for tableName=" + tableName + " from cache");
- }
+ public void clearRegionCache(final TableName tableName, byte[] row) {
+ metaCache.clearCache(tableName, row);
}
/*
@@ -1351,66 +1405,17 @@ public class HConnectionManager {
*/
@Override
public void clearCaches(final ServerName serverName) {
- if (!this.cachedServers.contains(serverName)) {
- return;
- }
-
- boolean deletedSomething = false;
- synchronized (this.cachedServers) {
- // We block here, because if there is an error on a server, it's likely that multiple
- // threads will get the error simultaneously. If there are hundreds of thousand of
- // region location to check, it's better to do this only once. A better pattern would
- // be to check if the server is dead when we get the region location.
- if (!this.cachedServers.contains(serverName)) {
- return;
- }
- for (Map tableLocations : cachedRegionLocations.values()) {
- for (Entry e : tableLocations.entrySet()) {
- HRegionLocation value = e.getValue();
- if (value != null
- && serverName.equals(value.getServerName())) {
- tableLocations.remove(e.getKey());
- deletedSomething = true;
- }
- }
- }
- this.cachedServers.remove(serverName);
- }
- if (deletedSomething && LOG.isDebugEnabled()) {
- LOG.debug("Removed all cached region locations that map to " + serverName);
- }
- }
-
- /*
- * @param tableName
- * @return Map of cached locations for passed tableName
- */
- private ConcurrentSkipListMap getTableLocations(
- final TableName tableName) {
- // find the map of cached locations for this table
- ConcurrentSkipListMap result;
- result = this.cachedRegionLocations.get(tableName);
- // if tableLocations for this table isn't built yet, make one
- if (result == null) {
- result = new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR);
- ConcurrentSkipListMap old =
- this.cachedRegionLocations.putIfAbsent(tableName, result);
- if (old != null) {
- return old;
- }
- }
- return result;
+ metaCache.clearCache(serverName);
}
@Override
public void clearRegionCache() {
- this.cachedRegionLocations.clear();
- this.cachedServers.clear();
+ metaCache.clearCache();
}
@Override
public void clearRegionCache(final TableName tableName) {
- this.cachedRegionLocations.remove(tableName);
+ metaCache.clearCache(tableName);
}
@Override
@@ -1426,37 +1431,7 @@ public class HConnectionManager {
*/
private void cacheLocation(final TableName tableName, final ServerName source,
final HRegionLocation location) {
- boolean isFromMeta = (source == null);
- byte [] startKey = location.getRegionInfo().getStartKey();
- ConcurrentMap tableLocations = getTableLocations(tableName);
- HRegionLocation oldLocation = tableLocations.putIfAbsent(startKey, location);
- boolean isNewCacheEntry = (oldLocation == null);
- if (isNewCacheEntry) {
- cachedServers.add(location.getServerName());
- return;
- }
- boolean updateCache;
- // If the server in cache sends us a redirect, assume it's always valid.
- if (oldLocation.getServerName().equals(source)) {
- updateCache = true;
- } else {
- long newLocationSeqNum = location.getSeqNum();
- // Meta record is stale - some (probably the same) server has closed the region
- // with later seqNum and told us about the new location.
- boolean isStaleMetaRecord = isFromMeta && (oldLocation.getSeqNum() > newLocationSeqNum);
- // Same as above for redirect. However, in this case, if the number is equal to previous
- // record, the most common case is that first the region was closed with seqNum, and then
- // opened with the same seqNum; hence we will ignore the redirect.
- // There are so many corner cases with various combinations of opens and closes that
- // an additional counter on top of seqNum would be necessary to handle them all.
- boolean isStaleRedirect = !isFromMeta && (oldLocation.getSeqNum() >= newLocationSeqNum);
- boolean isStaleUpdate = (isStaleMetaRecord || isStaleRedirect);
- updateCache = (!isStaleUpdate);
- }
- if (updateCache) {
- tableLocations.replace(startKey, oldLocation, location);
- cachedServers.add(location.getServerName());
- }
+ metaCache.cacheLocation(tableName, source, location);
}
// Map keyed by service name + regionserver to service stub implementation
@@ -2120,7 +2095,7 @@ public class HConnectionManager {
}
};
}
-
+
private static void release(MasterServiceState mss) {
if (mss != null && mss.connection != null) {
@@ -2179,37 +2154,17 @@ public class HConnectionManager {
cacheLocation(hri.getTable(), source, newHrl);
}
- /**
- * Deletes the cached location of the region if necessary, based on some error from source.
- * @param hri The region in question.
- * @param source The source of the error that prompts us to invalidate cache.
- */
- void deleteCachedLocation(HRegionInfo hri, ServerName source) {
- getTableLocations(hri.getTable()).remove(hri.getStartKey());
- }
-
@Override
public void deleteCachedRegionLocation(final HRegionLocation location) {
- if (location == null || location.getRegionInfo() == null) {
- return;
- }
-
- HRegionLocation removedLocation;
- TableName tableName = location.getRegionInfo().getTable();
- Map tableLocations = getTableLocations(tableName);
- removedLocation = tableLocations.remove(location.getRegionInfo().getStartKey());
- if (LOG.isDebugEnabled() && removedLocation != null) {
- LOG.debug("Removed " +
- location.getRegionInfo().getRegionNameAsString() +
- " for tableName=" + tableName +
- " from cache");
- }
+ metaCache.clearCache(location);
}
@Override
public void updateCachedLocations(final TableName tableName, byte[] rowkey,
final Object exception, final HRegionLocation source) {
- updateCachedLocations(tableName, rowkey, exception, source.getServerName());
+ assert source != null;
+ updateCachedLocations(tableName, source.getRegionInfo().getRegionName()
+ , rowkey, exception, source.getServerName());
}
/**
@@ -2221,7 +2176,7 @@ public class HConnectionManager {
* @param source server that is the source of the location update.
*/
@Override
- public void updateCachedLocations(final TableName tableName, byte[] rowkey,
+ public void updateCachedLocations(final TableName tableName, byte[] regionName, byte[] rowkey,
final Object exception, final ServerName source) {
if (rowkey == null || tableName == null) {
LOG.warn("Coding error, see method javadoc. row=" + (rowkey == null ? "null" : rowkey) +
@@ -2234,8 +2189,18 @@ public class HConnectionManager {
return;
}
+ if (regionName == null) {
+ // we do not know which region, so just remove the cache entry for the row and server
+ metaCache.clearCache(tableName, rowkey, source);
+ return;
+ }
+
// Is it something we have already updated?
- final HRegionLocation oldLocation = getCachedLocation(tableName, rowkey);
+ final RegionLocations oldLocations = getCachedLocation(tableName, rowkey);
+ HRegionLocation oldLocation = null;
+ if (oldLocations != null) {
+ oldLocation = oldLocations.getRegionLocationByRegionName(regionName);
+ }
if (oldLocation == null || !source.equals(oldLocation.getServerName())) {
// There is no such location in the cache (it's been removed already) or
// the cache has already been refreshed with a different location. => nothing to do
@@ -2266,8 +2231,8 @@ public class HConnectionManager {
}
// If we're here, it means that can cannot be sure about the location, so we remove it from
- // the cache.
- deleteCachedLocation(regionInfo, source);
+ // the cache. Do not send the source because source can be a new server in the same host:port
+ metaCache.clearCache(regionInfo);
}
@Override
@@ -2354,35 +2319,15 @@ public class HConnectionManager {
* Return the number of cached region for a table. It will only be called
* from a unit test.
*/
+ @VisibleForTesting
int getNumberOfCachedRegionLocations(final TableName tableName) {
- Map tableLocs = this.cachedRegionLocations.get(tableName);
- if (tableLocs == null) {
- return 0;
- }
- return tableLocs.values().size();
- }
-
- /**
- * Check the region cache to see whether a region is cached yet or not.
- * Called by unit tests.
- * @param tableName tableName
- * @param row row
- * @return Region cached or not.
- */
- boolean isRegionCached(TableName tableName, final byte[] row) {
- HRegionLocation location = getCachedLocation(tableName, row);
- return location != null;
+ return metaCache.getNumberOfCachedRegionLocations(tableName);
}
@Override
public void setRegionCachePrefetch(final TableName tableName,
final boolean enable) {
- if (!enable) {
- regionCachePrefetchDisabledTables.add(Bytes.mapKey(tableName.getName()));
- }
- else {
- regionCachePrefetchDisabledTables.remove(Bytes.mapKey(tableName.getName()));
- }
+ metaCache.setRegionCachePrefetch(tableName, enable);
}
@Override
@@ -2393,7 +2338,7 @@ public class HConnectionManager {
@Override
public boolean getRegionCachePrefetch(TableName tableName) {
- return !regionCachePrefetchDisabledTables.contains(Bytes.mapKey(tableName.getName()));
+ return metaCache.getRegionCachePrefetch(tableName);
}
@Override
@@ -2701,7 +2646,7 @@ public class HConnectionManager {
* Look for an exception we know in the remote exception:
* - hadoop.ipc wrapped exceptions
* - nested exceptions
- *
+ *
* Looks for: RegionMovedException / RegionOpeningException / RegionTooBusyException
* @return null if we didn't find the exception, the exception otherwise.
*/
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index fa25dde..5820e08 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.AsyncProcess.AsyncRequestFuture;
@@ -69,6 +70,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
+import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.Service;
import com.google.protobuf.ServiceException;
@@ -585,12 +587,15 @@ public class HTable implements HTableInterface {
* @return Pair of arrays of region starting and ending row keys
* @throws IOException if a remote or network exception occurs
*/
+ // TODO: these are not in HTableInterface. Should we add them there or move these to HBaseAdmin?
public Pair getStartEndKeys() throws IOException {
- NavigableMap regions = getRegionLocations();
+
+ List regions = listRegionLocations();
final List startKeyList = new ArrayList(regions.size());
final List endKeyList = new ArrayList(regions.size());
- for (HRegionInfo region : regions.keySet()) {
+ for (RegionLocations locations : regions) {
+ HRegionInfo region = locations.getRegionLocation().getRegionInfo();
startKeyList.add(region.getStartKey());
endKeyList.add(region.getEndKey());
}
@@ -600,13 +605,20 @@ public class HTable implements HTableInterface {
endKeyList.toArray(new byte[endKeyList.size()][]));
}
+ @VisibleForTesting
+ List listRegionLocations() throws IOException {
+ return MetaScanner.listTableRegionLocations(getConfiguration(), this.connection, getName());
+ }
+
/**
* Gets all the regions and their address for this table.
*
* This is mainly useful for the MapReduce integration.
* @return A map of HRegionInfo with it's server address
* @throws IOException if a remote or network exception occurs
+ * @deprecated This is no longer a public API
*/
+ @Deprecated
public NavigableMap getRegionLocations() throws IOException {
// TODO: Odd that this returns a Map of HRI to SN whereas getRegionLocation, singular, returns an HRegionLocation.
return MetaScanner.allTableRegions(getConfiguration(), this.connection, getName(), false);
@@ -620,7 +632,9 @@ public class HTable implements HTableInterface {
* @return A list of HRegionLocations corresponding to the regions that
* contain the specified range
* @throws IOException if a remote or network exception occurs
+ * @deprecated This is no longer a public API
*/
+ @Deprecated
public List getRegionsInRange(final byte [] startKey,
final byte [] endKey) throws IOException {
return getRegionsInRange(startKey, endKey, false);
@@ -635,7 +649,9 @@ public class HTable implements HTableInterface {
* @return A list of HRegionLocations corresponding to the regions that
* contain the specified range
* @throws IOException if a remote or network exception occurs
+ * @deprecated This is no longer a public API
*/
+ @Deprecated
public List getRegionsInRange(final byte [] startKey,
final byte [] endKey, final boolean reload) throws IOException {
return getKeysAndRegionsInRange(startKey, endKey, false, reload).getSecond();
@@ -651,7 +667,9 @@ public class HTable implements HTableInterface {
* @return A pair of list of start keys and list of HRegionLocations that
* contain the specified range
* @throws IOException if a remote or network exception occurs
+ * @deprecated This is no longer a public API
*/
+ @Deprecated
private Pair, List> getKeysAndRegionsInRange(
final byte[] startKey, final byte[] endKey, final boolean includeEndKey)
throws IOException {
@@ -669,7 +687,9 @@ public class HTable implements HTableInterface {
* @return A pair of list of start keys and list of HRegionLocations that
* contain the specified range
* @throws IOException if a remote or network exception occurs
+ * @deprecated This is no longer a public API
*/
+ @Deprecated
private Pair, List> getKeysAndRegionsInRange(
final byte[] startKey, final byte[] endKey, final boolean includeEndKey,
final boolean reload) throws IOException {
@@ -702,7 +722,8 @@ public class HTable implements HTableInterface {
throws IOException {
RegionServerCallable callable = new RegionServerCallable(this.connection,
tableName, row) {
- public Result call() throws IOException {
+ @Override
+ public Result call() throws IOException {
return ProtobufUtil.getRowOrBefore(getStub(),
getLocation().getRegionInfo().getRegionName(), row, family);
}
@@ -757,6 +778,7 @@ public class HTable implements HTableInterface {
public Result get(final Get get) throws IOException {
RegionServerCallable callable = new RegionServerCallable(this.connection,
getName(), get.getRow()) {
+ @Override
public Result call() throws IOException {
return ProtobufUtil.get(getStub(), getLocation().getRegionInfo().getRegionName(), get);
}
@@ -807,6 +829,7 @@ public class HTable implements HTableInterface {
* @deprecated If any exception is thrown by one of the actions, there is no way to
* retrieve the partially executed results. Use {@link #batch(List, Object[])} instead.
*/
+ @Deprecated
@Override
public Object[] batch(final List extends Row> actions)
throws InterruptedException, IOException {
@@ -832,6 +855,7 @@ public class HTable implements HTableInterface {
* {@link #batchCallback(List, Object[], org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)}
* instead.
*/
+ @Deprecated
@Override
public Object[] batchCallback(
final List extends Row> actions, final Batch.Callback callback) throws IOException,
@@ -849,6 +873,7 @@ public class HTable implements HTableInterface {
throws IOException {
RegionServerCallable callable = new RegionServerCallable(connection,
tableName, delete.getRow()) {
+ @Override
public Boolean call() throws IOException {
try {
MutateRequest request = RequestConverter.buildMutateRequest(
@@ -985,6 +1010,7 @@ public class HTable implements HTableInterface {
public void mutateRow(final RowMutations rm) throws IOException {
RegionServerCallable callable =
new RegionServerCallable(connection, getName(), rm.getRow()) {
+ @Override
public Void call() throws IOException {
try {
RegionAction.Builder regionMutationBuilder = RequestConverter.buildRegionAction(
@@ -1018,6 +1044,7 @@ public class HTable implements HTableInterface {
final long nonceGroup = ng.getNonceGroup(), nonce = ng.newNonce();
RegionServerCallable callable =
new RegionServerCallable(this.connection, getName(), append.getRow()) {
+ @Override
public Result call() throws IOException {
try {
MutateRequest request = RequestConverter.buildMutateRequest(
@@ -1048,6 +1075,7 @@ public class HTable implements HTableInterface {
final long nonceGroup = ng.getNonceGroup(), nonce = ng.newNonce();
RegionServerCallable callable = new RegionServerCallable(this.connection,
getName(), increment.getRow()) {
+ @Override
public Result call() throws IOException {
try {
MutateRequest request = RequestConverter.buildMutateRequest(
@@ -1110,6 +1138,7 @@ public class HTable implements HTableInterface {
final long nonceGroup = ng.getNonceGroup(), nonce = ng.newNonce();
RegionServerCallable callable =
new RegionServerCallable(connection, getName(), row) {
+ @Override
public Long call() throws IOException {
try {
MutateRequest request = RequestConverter.buildIncrementRequest(
@@ -1139,6 +1168,7 @@ public class HTable implements HTableInterface {
throws IOException {
RegionServerCallable callable =
new RegionServerCallable(connection, getName(), row) {
+ @Override
public Boolean call() throws IOException {
try {
MutateRequest request = RequestConverter.buildMutateRequest(
@@ -1164,6 +1194,7 @@ public class HTable implements HTableInterface {
throws IOException {
RegionServerCallable callable =
new RegionServerCallable(connection, getName(), row) {
+ @Override
public Boolean call() throws IOException {
try {
CompareType compareType = CompareType.valueOf(compareOp.name());
@@ -1190,6 +1221,7 @@ public class HTable implements HTableInterface {
throws IOException {
RegionServerCallable callable =
new RegionServerCallable(connection, getName(), row) {
+ @Override
public Boolean call() throws IOException {
try {
MutateRequest request = RequestConverter.buildMutateRequest(
@@ -1215,6 +1247,7 @@ public class HTable implements HTableInterface {
throws IOException {
RegionServerCallable callable =
new RegionServerCallable(connection, getName(), row) {
+ @Override
public Boolean call() throws IOException {
try {
CompareType compareType = CompareType.valueOf(compareOp.name());
@@ -1399,6 +1432,7 @@ public class HTable implements HTableInterface {
* @param writeBufferSize The new write buffer size, in bytes.
* @throws IOException if a remote or network exception occurs.
*/
+ @Override
public void setWriteBufferSize(long writeBufferSize) throws IOException {
this.writeBufferSize = writeBufferSize;
if(currentWriteBufferSize > writeBufferSize) {
@@ -1524,6 +1558,7 @@ public class HTable implements HTableInterface {
/**
* {@inheritDoc}
*/
+ @Override
public CoprocessorRpcChannel coprocessorService(byte[] row) {
return new RegionCoprocessorRpcChannel(connection, tableName, row);
}
@@ -1538,6 +1573,7 @@ public class HTable implements HTableInterface {
final Map results = Collections.synchronizedMap(
new TreeMap(Bytes.BYTES_COMPARATOR));
coprocessorService(service, startKey, endKey, callable, new Batch.Callback() {
+ @Override
public void update(byte[] region, byte[] row, R value) {
if (region != null) {
results.put(region, value);
@@ -1565,6 +1601,7 @@ public class HTable implements HTableInterface {
new RegionCoprocessorRpcChannel(connection, tableName, r);
Future future = pool.submit(
new Callable() {
+ @Override
public R call() throws Exception {
T instance = ProtobufUtil.newServiceStub(service, channel);
R result = callable.call(instance);
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java
new file mode 100644
index 0000000..b76e4c6
--- /dev/null
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java
@@ -0,0 +1,363 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.ConcurrentSkipListSet;
+import java.util.concurrent.CopyOnWriteArraySet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * A cache implementation for region locations from meta.
+ */
+@InterfaceAudience.Private
+public class MetaCache {
+
+ private static final Log LOG = LogFactory.getLog(MetaCache.class);
+
+ /**
+ * Map of table to table {@link HRegionLocation}s.
+ */
+ private final ConcurrentMap>
+ cachedRegionLocations =
+ new ConcurrentHashMap>();
+
+ // The presence of a server in the map implies it's likely that there is an
+ // entry in cachedRegionLocations that map to this server; but the absence
+ // of a server in this map guarentees that there is no entry in cache that
+ // maps to the absent server.
+ // The access to this attribute must be protected by a lock on cachedRegionLocations
+ private final Set cachedServers = new ConcurrentSkipListSet();
+
+ // region cache prefetch is enabled by default. this set contains all
+ // tables whose region cache prefetch are disabled.
+ private final Set regionCachePrefetchDisabledTables =
+ new CopyOnWriteArraySet();
+
+ /**
+ * Search the cache for a location that fits our table and row key.
+ * Return null if no suitable region is located.
+ *
+ * @param tableName
+ * @param row
+ * @return Null or region location found in cache.
+ */
+ public RegionLocations getCachedLocation(final TableName tableName, final byte [] row) {
+ ConcurrentSkipListMap tableLocations =
+ getTableLocations(tableName);
+
+ Entry e = tableLocations.floorEntry(row);
+ if (e == null) {
+ return null;
+ }
+ RegionLocations possibleRegion = e.getValue();
+
+ // make sure that the end key is greater than the row we're looking
+ // for, otherwise the row actually belongs in the next region, not
+ // this one. the exception case is when the endkey is
+ // HConstants.EMPTY_END_ROW, signifying that the region we're
+ // checking is actually the last region in the table.
+ byte[] endKey = possibleRegion.getRegionLocation().getRegionInfo().getEndKey();
+ if (Bytes.equals(endKey, HConstants.EMPTY_END_ROW) ||
+ tableName.getRowComparator().compareRows(
+ endKey, 0, endKey.length, row, 0, row.length) > 0) {
+ return possibleRegion;
+ }
+
+ // Passed all the way through, so we got nothing - complete cache miss
+ return null;
+ }
+
+ /**
+ * Put a newly discovered HRegionLocation into the cache.
+ * @param tableName The table name.
+ * @param source the source of the new location
+ * @param location the new location
+ */
+ public void cacheLocation(final TableName tableName, final ServerName source,
+ final HRegionLocation location) {
+ assert source != null;
+ byte [] startKey = location.getRegionInfo().getStartKey();
+ ConcurrentMap tableLocations = getTableLocations(tableName);
+ RegionLocations locations = new RegionLocations(new HRegionLocation[] {location}) ;
+ RegionLocations oldLocations = tableLocations.putIfAbsent(startKey, locations);
+ boolean isNewCacheEntry = (oldLocations == null);
+ if (isNewCacheEntry) {
+ addToCachedServers(locations);
+ return;
+ }
+
+ // If the server in cache sends us a redirect, assume it's always valid.
+ HRegionLocation oldLocation = oldLocations.getRegionLocation(
+ location.getRegionInfo().getReplicaId());
+ boolean force = oldLocation != null && oldLocation.getServerName() != null
+ && oldLocation.getServerName().equals(source);
+
+ // For redirect if the number is equal to previous
+ // record, the most common case is that first the region was closed with seqNum, and then
+ // opened with the same seqNum; hence we will ignore the redirect.
+ // There are so many corner cases with various combinations of opens and closes that
+ // an additional counter on top of seqNum would be necessary to handle them all.
+ RegionLocations updatedLocations = oldLocations.updateLocation(location, false, force);
+ if (oldLocations != updatedLocations) {
+ tableLocations.replace(startKey, oldLocations, updatedLocations);
+ addToCachedServers(updatedLocations);
+ }
+ }
+
+ /**
+ * Put a newly discovered HRegionLocation into the cache.
+ * @param tableName The table name.
+ * @param location the new location
+ */
+ public void cacheLocation(final TableName tableName, final RegionLocations location) {
+ byte [] startKey = location.getRegionLocation().getRegionInfo().getStartKey();
+ ConcurrentMap tableLocations = getTableLocations(tableName);
+ RegionLocations oldLocation = tableLocations.putIfAbsent(startKey, location);
+ boolean isNewCacheEntry = (oldLocation == null);
+ if (isNewCacheEntry) {
+ addToCachedServers(location);
+ return;
+ }
+
+ // merge old and new locations and add it to the cache
+ // Meta record might be stale - some (probably the same) server has closed the region
+ // with later seqNum and told us about the new location.
+ RegionLocations mergedLocation = oldLocation.mergeLocations(location);
+ tableLocations.replace(startKey, oldLocation, mergedLocation);
+ addToCachedServers(location);
+ }
+
+ private void addToCachedServers(RegionLocations locations) {
+ for (HRegionLocation loc : locations.getRegionLocations()) {
+ if (loc != null) {
+ cachedServers.add(loc.getServerName());
+ }
+ }
+ }
+
+ /**
+ * @param tableName
+ * @return Map of cached locations for passed tableName
+ */
+ private ConcurrentSkipListMap
+ getTableLocations(final TableName tableName) {
+ // find the map of cached locations for this table
+ ConcurrentSkipListMap result;
+ result = this.cachedRegionLocations.get(tableName);
+ // if tableLocations for this table isn't built yet, make one
+ if (result == null) {
+ result = new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR);
+ ConcurrentSkipListMap old =
+ this.cachedRegionLocations.putIfAbsent(tableName, result);
+ if (old != null) {
+ return old;
+ }
+ }
+ return result;
+ }
+
+ /**
+ * Check the region cache to see whether a region is cached yet or not.
+ * @param tableName tableName
+ * @param row row
+ * @return Region cached or not.
+ */
+ public boolean isRegionCached(TableName tableName, final byte[] row) {
+ RegionLocations location = getCachedLocation(tableName, row);
+ return location != null;
+ }
+
+ /**
+ * Return the number of cached region for a table. It will only be called
+ * from a unit test.
+ */
+ public int getNumberOfCachedRegionLocations(final TableName tableName) {
+ Map tableLocs = this.cachedRegionLocations.get(tableName);
+ if (tableLocs == null) {
+ return 0;
+ }
+ int numRegions = 0;
+ for (RegionLocations tableLoc : tableLocs.values()) {
+ numRegions += tableLoc.numNonNullElements();
+ }
+ return numRegions;
+ }
+
+ /**
+ * Delete all cached entries.
+ */
+ public void clearCache() {
+ this.cachedRegionLocations.clear();
+ this.cachedServers.clear();
+ }
+
+ /**
+ * Delete all cached entries of a server.
+ */
+ public void clearCache(final ServerName serverName) {
+ if (!this.cachedServers.contains(serverName)) {
+ return;
+ }
+
+ boolean deletedSomething = false;
+ synchronized (this.cachedServers) {
+ // We block here, because if there is an error on a server, it's likely that multiple
+ // threads will get the error simultaneously. If there are hundreds of thousand of
+ // region location to check, it's better to do this only once. A better pattern would
+ // be to check if the server is dead when we get the region location.
+ if (!this.cachedServers.contains(serverName)) {
+ return;
+ }
+ for (ConcurrentMap tableLocations : cachedRegionLocations.values()){
+ for (Entry e : tableLocations.entrySet()) {
+ RegionLocations regionLocations = e.getValue();
+ if (regionLocations != null) {
+ RegionLocations updatedLocations = regionLocations.removeByServer(serverName);
+ deletedSomething |= regionLocations == updatedLocations;
+ if (updatedLocations != regionLocations) {
+ if (updatedLocations.isEmpty()) {
+ tableLocations.remove(e.getKey(), regionLocations);
+ } else {
+ tableLocations.replace(e.getKey(), regionLocations, updatedLocations);
+ }
+ }
+ }
+ }
+ }
+ this.cachedServers.remove(serverName);
+ }
+ if (deletedSomething && LOG.isDebugEnabled()) {
+ LOG.debug("Removed all cached region locations that map to " + serverName);
+ }
+ }
+
+ /**
+ * Delete all cached entries of a table.
+ */
+ public void clearCache(final TableName tableName) {
+ this.cachedRegionLocations.remove(tableName);
+ }
+
+ /**
+ * Delete a cached location, no matter what it is. Called when we were told to not use cache.
+ * @param tableName tableName
+ * @param row
+ */
+ public void clearCache(final TableName tableName, final byte [] row) {
+ ConcurrentMap tableLocations = getTableLocations(tableName);
+
+ RegionLocations regionLocations = getCachedLocation(tableName, row);
+ if (regionLocations != null) {
+ byte[] startKey = regionLocations.getRegionLocation().getRegionInfo().getStartKey();
+ boolean removed = tableLocations.remove(startKey, regionLocations);
+ if (removed && LOG.isDebugEnabled()) {
+ LOG.debug("Removed " + regionLocations + " from cache");
+ }
+ }
+ }
+
+ /**
+ * Delete a cached location for a table, row and server
+ */
+ public void clearCache(final TableName tableName, final byte [] row, ServerName serverName) {
+ ConcurrentMap tableLocations = getTableLocations(tableName);
+
+ RegionLocations regionLocations = getCachedLocation(tableName, row);
+ if (regionLocations != null) {
+ RegionLocations updatedLocations = regionLocations.removeByServer(serverName);
+ if (updatedLocations != regionLocations) {
+ byte[] startKey = regionLocations.getRegionLocation().getRegionInfo().getStartKey();
+ if (updatedLocations.isEmpty()) {
+ tableLocations.remove(startKey, regionLocations);
+ } else {
+ tableLocations.replace(startKey, regionLocations, updatedLocations);
+ }
+ }
+ }
+ }
+
+ /**
+ * Deletes the cached location of the region if necessary, based on some error from source.
+ * @param hri The region in question.
+ */
+ public void clearCache(HRegionInfo hri) {
+ ConcurrentMap tableLocations = getTableLocations(hri.getTable());
+ RegionLocations regionLocations = tableLocations.get(hri.getStartKey());
+ if (regionLocations != null) {
+ HRegionLocation oldLocation = regionLocations.getRegionLocation(hri.getReplicaId());
+ RegionLocations updatedLocations = regionLocations.remove(oldLocation);
+ if (updatedLocations != regionLocations) {
+ if (updatedLocations.isEmpty()) {
+ tableLocations.remove(hri.getStartKey(), regionLocations);
+ } else {
+ tableLocations.replace(hri.getStartKey(), regionLocations, updatedLocations);
+ }
+ }
+ }
+ }
+
+ public void clearCache(final HRegionLocation location) {
+ if (location == null) {
+ return;
+ }
+
+ TableName tableName = location.getRegionInfo().getTable();
+ ConcurrentMap tableLocations = getTableLocations(tableName);
+ RegionLocations rll = tableLocations.get(location.getRegionInfo().getStartKey());
+ RegionLocations updatedLocations = rll.remove(location);
+ if (updatedLocations.isEmpty()) {
+ tableLocations.remove(location.getRegionInfo().getStartKey(), rll);
+ }
+ if (LOG.isDebugEnabled() && (rll == updatedLocations)) {
+ LOG.debug("Removed " +
+ location.getRegionInfo().getRegionNameAsString() +
+ " for tableName=" + tableName +
+ " from cache");
+ }
+ }
+
+ public void setRegionCachePrefetch(final TableName tableName, final boolean enable) {
+ if (!enable) {
+ regionCachePrefetchDisabledTables.add(Bytes.mapKey(tableName.getName()));
+ } else {
+ regionCachePrefetchDisabledTables.remove(Bytes.mapKey(tableName.getName()));
+ }
+ }
+
+ public boolean getRegionCachePrefetch(TableName tableName) {
+ return !regionCachePrefetchDisabledTables.contains(Bytes.mapKey(tableName.getName()));
+ }
+
+}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
index bee2722..06d54bb 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
@@ -30,11 +30,14 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.util.Bytes;
/**
@@ -49,6 +52,7 @@ import org.apache.hadoop.hbase.util.Bytes;
* see HBASE-5986, and {@link DefaultMetaScannerVisitor} for details.
*/
@InterfaceAudience.Private
+//TODO: merge this to MetaReader, get rid of it.
public class MetaScanner {
private static final Log LOG = LogFactory.getLog(MetaScanner.class);
/**
@@ -216,14 +220,14 @@ public class MetaScanner {
* table Result.
* @param data a Result object from the catalog table scan
* @return HRegionInfo or null
+ * @deprecated Use {@link MetaReader#getRegionLocations(Result)}
*/
+ @Deprecated
public static HRegionInfo getHRegionInfo(Result data) {
return HRegionInfo.getHRegionInfo(data);
}
/**
- * Used in tests.
- *
* Lists all of the regions currently in META.
* @param conf
* @param offlined True if we are to include offlined regions, false and we'll
@@ -234,22 +238,23 @@ public class MetaScanner {
public static List listAllRegions(Configuration conf, final boolean offlined)
throws IOException {
final List regions = new ArrayList();
- MetaScannerVisitor visitor = new DefaultMetaScannerVisitor() {
+ MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
@Override
- public boolean processRowInternal(Result result) throws IOException {
+ public boolean processRow(Result result) throws IOException {
if (result == null || result.isEmpty()) {
return true;
}
- HRegionInfo regionInfo = getHRegionInfo(result);
- if (regionInfo == null) {
- LOG.warn("Null REGIONINFO_QUALIFIER: " + result);
- return true;
+ RegionLocations locations = MetaReader.getRegionLocations(result);
+ if (locations == null) return true;
+ for (HRegionLocation loc : locations.getRegionLocations()) {
+ if (loc != null) {
+ HRegionInfo regionInfo = loc.getRegionInfo();
+ // If region offline AND we are not to include offlined regions, return.
+ if (regionInfo.isOffline() && !offlined) continue;
+ regions.add(regionInfo);
+ }
}
-
- // If region offline AND we are not to include offlined regions, return.
- if (regionInfo.isOffline() && !offlined) return true;
- regions.add(regionInfo);
return true;
}
};
@@ -272,10 +277,34 @@ public class MetaScanner {
new TreeMap();
MetaScannerVisitor visitor = new TableMetaScannerVisitor(tableName) {
@Override
- public boolean processRowInternal(Result rowResult) throws IOException {
- HRegionInfo info = getHRegionInfo(rowResult);
- ServerName serverName = HRegionInfo.getServerName(rowResult);
- regions.put(new UnmodifyableHRegionInfo(info), serverName);
+ public boolean processRowInternal(Result result) throws IOException {
+ RegionLocations locations = MetaReader.getRegionLocations(result);
+ if (locations == null) return true;
+ for (HRegionLocation loc : locations.getRegionLocations()) {
+ if (loc != null) {
+ HRegionInfo regionInfo = loc.getRegionInfo();
+ regions.put(new UnmodifyableHRegionInfo(regionInfo), loc.getServerName());
+ }
+ }
+ return true;
+ }
+ };
+ metaScan(conf, connection, visitor, tableName);
+ return regions;
+ }
+
+ /**
+ * Lists table regions and locations grouped by region range from META.
+ */
+ public static List listTableRegionLocations(Configuration conf,
+ HConnection connection, final TableName tableName) throws IOException {
+ final List regions = new ArrayList();
+ MetaScannerVisitor visitor = new TableMetaScannerVisitor(tableName) {
+ @Override
+ public boolean processRowInternal(Result result) throws IOException {
+ RegionLocations locations = MetaReader.getRegionLocations(result);
+ if (locations == null) return true;
+ regions.add(locations);
return true;
}
};
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java
new file mode 100644
index 0000000..abe9bf5
--- /dev/null
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HRegionInfo;
+
+/**
+ * Utility methods which contain the logic for regions and replicas.
+ */
+@InterfaceAudience.Private
+public class RegionReplicaUtil {
+
+ /**
+ * The default replicaId for the region
+ */
+ private static final int DEFAULT_REPLICA_ID = 0;
+
+ /**
+ * Returns the HRegionInfo for the given replicaId. HRegionInfo's correspond to
+ * a range of a table, but more than one "instance" of the same range can be
+ * deployed which are differentiated by the replicaId.
+ * @param replicaId the replicaId to use
+ * @return an HRegionInfo object corresponding to the same range (table, start and
+ * end key), but for the given replicaId.
+ */
+ public static HRegionInfo getRegionInfoForReplica(HRegionInfo regionInfo, int replicaId) {
+ if (regionInfo.getReplicaId() == replicaId) {
+ return regionInfo;
+ }
+ HRegionInfo replicaInfo = new HRegionInfo(regionInfo.getTable(), regionInfo.getStartKey(),
+ regionInfo.getEndKey(), regionInfo.isSplit(), regionInfo.getRegionId(), replicaId);
+
+ replicaInfo.setOffline(regionInfo.isOffline());
+ return replicaInfo;
+ }
+
+ /**
+ * Returns the HRegionInfo for the default replicaId (0). HRegionInfo's correspond to
+ * a range of a table, but more than one "instance" of the same range can be
+ * deployed which are differentiated by the replicaId.
+ * @return an HRegionInfo object corresponding to the same range (table, start and
+ * end key), but for the default replicaId.
+ */
+ public static HRegionInfo getRegionInfoForDefaultReplica(HRegionInfo regionInfo) {
+ return getRegionInfoForReplica(regionInfo, DEFAULT_REPLICA_ID);
+ }
+
+}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
index 74c1e4a..ea33963 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
@@ -73,6 +73,7 @@ public abstract class RegionServerCallable implements RetryingCallable {
* @param reload Set this to true if connection should re-find the region
* @throws IOException e
*/
+ @Override
public void prepare(final boolean reload) throws IOException {
this.location = connection.getRegionLocation(tableName, row, reload);
if (this.location == null) {
@@ -124,7 +125,7 @@ public abstract class RegionServerCallable implements RetryingCallable {
// hbase:meta again to find the new location
if (this.location != null) getConnection().clearCaches(location.getServerName());
} else if (t instanceof RegionMovedException) {
- getConnection().updateCachedLocations(tableName, row, t, location.getServerName());
+ getConnection().updateCachedLocations(tableName, row, t, location);
} else if (t instanceof NotServingRegionException && !retrying) {
// Purge cache entries for this specific region from hbase:meta cache
// since we don't call connect(true) when number of retries is 1.
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
index 2890cf4..aab547e 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException;
+import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.HRegionLocation;
/**
* Cluster registry.
@@ -36,7 +36,7 @@ interface Registry {
* @return Meta region location
* @throws IOException
*/
- HRegionLocation getMetaRegionLocation() throws IOException;
+ RegionLocations getMetaRegionLocation() throws IOException;
/**
* @return Cluster id.
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
index 3d765c6..39e4992 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java
@@ -21,10 +21,11 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.ZKTableReadOnly;
@@ -48,7 +49,7 @@ class ZooKeeperRegistry implements Registry {
}
@Override
- public HRegionLocation getMetaRegionLocation() throws IOException {
+ public RegionLocations getMetaRegionLocation() throws IOException {
ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher();
try {
@@ -61,7 +62,8 @@ class ZooKeeperRegistry implements Registry {
"; serverName=" + ((servername == null) ? "null" : servername));
}
if (servername == null) return null;
- return new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, servername, 0);
+ HRegionLocation loc = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, servername, 0);
+ return new RegionLocations(new HRegionLocation[] {loc});
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return null;
diff --git hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java
new file mode 100644
index 0000000..603f8d5
--- /dev/null
+++ hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java
@@ -0,0 +1,275 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import org.junit.Test;
+
+public class TestRegionLocations {
+
+ ServerName sn0 = ServerName.valueOf("host0", 10, 10);
+ ServerName sn1 = ServerName.valueOf("host1", 10, 10);
+ ServerName sn2 = ServerName.valueOf("host2", 10, 10);
+ ServerName sn3 = ServerName.valueOf("host3", 10, 10);
+
+ HRegionInfo info0 = hri(0);
+ HRegionInfo info1 = hri(1);
+ HRegionInfo info2 = hri(2);
+ HRegionInfo info9 = hri(9);
+
+ @Test
+ public void testSizeMethods() {
+ RegionLocations list = new RegionLocations();
+ assertTrue(list.isEmpty());
+ assertEquals(0, list.size());
+ assertEquals(0, list.numNonNullElements());
+
+ list = hrll((HRegionLocation)null);
+ assertTrue(list.isEmpty());
+ assertEquals(0, list.size());
+ assertEquals(0, list.numNonNullElements());
+
+ HRegionInfo info0 = hri(0);
+ list = hrll(hrl(info0, null));
+ assertFalse(list.isEmpty());
+ assertEquals(1, list.size());
+ assertEquals(1, list.numNonNullElements());
+
+ HRegionInfo info9 = hri(9);
+ list = hrll(hrl(info9, null));
+ assertFalse(list.isEmpty());
+ assertEquals(10, list.size());
+ assertEquals(1, list.numNonNullElements());
+
+ list = hrll(hrl(info0, null), hrl(info9, null));
+ assertFalse(list.isEmpty());
+ assertEquals(10, list.size());
+ assertEquals(2, list.numNonNullElements());
+ }
+
+ private HRegionInfo hri(int replicaId) {
+ TableName table = TableName.valueOf("table");
+ byte[] startKey = HConstants.EMPTY_START_ROW;
+ byte[] endKey = HConstants.EMPTY_END_ROW;
+ long regionId = System.currentTimeMillis();
+ HRegionInfo info = new HRegionInfo(table, startKey, endKey, false, regionId, replicaId);
+ return info;
+ }
+
+ private HRegionLocation hrl(HRegionInfo hri, ServerName sn) {
+ return new HRegionLocation(hri, sn);
+ }
+
+ private HRegionLocation hrl(HRegionInfo hri, ServerName sn, long seqNum) {
+ return new HRegionLocation(hri, sn, seqNum);
+ }
+
+ private RegionLocations hrll(HRegionLocation ... locations) {
+ return new RegionLocations(locations);
+ }
+
+ @Test
+ public void testRemoveByServer() {
+ RegionLocations list;
+
+ // test remove from empty list
+ list = new RegionLocations();
+ assertTrue(list == list.removeByServer(sn0));
+
+ // test remove from single element list
+ list = hrll(hrl(info0, sn0));
+ assertTrue(list == list.removeByServer(sn1));
+ list = list.removeByServer(sn0);
+ assertTrue(list.isEmpty());
+
+ // test remove from multi element list
+ list = hrll(hrl(info0, sn0), hrl(info1, sn1), hrl(info2, sn2), hrl(info9, sn2));
+ assertTrue(list == list.removeByServer(sn3)); // no region is mapped to sn3
+ list = list.removeByServer(sn0);
+ assertNull(list.getRegionLocation(0));
+ assertEquals(sn1, list.getRegionLocation(1).getServerName());
+ assertEquals(sn2, list.getRegionLocation(2).getServerName());
+ assertNull(list.getRegionLocation(5));
+ assertEquals(sn2, list.getRegionLocation(9).getServerName());
+
+ // test multi-element remove from multi element list
+ list = hrll(hrl(info0, sn1), hrl(info1, sn1), hrl(info2, sn0), hrl(info9, sn0));
+ list = list.removeByServer(sn0);
+ assertEquals(sn1, list.getRegionLocation(0).getServerName());
+ assertEquals(sn1, list.getRegionLocation(1).getServerName());
+ assertNull(list.getRegionLocation(2));
+ assertNull(list.getRegionLocation(5));
+ assertNull(list.getRegionLocation(9));
+ }
+
+ @Test
+ public void testRemove() {
+ RegionLocations list;
+
+ // test remove from empty list
+ list = new RegionLocations();
+ assertTrue(list == list.remove(hrl(info0, sn0)));
+
+ // test remove from single element list
+ list = hrll(hrl(info0, sn0));
+ assertTrue(list == list.remove(hrl(info0, sn1)));
+ list = list.remove(hrl(info0, sn0));
+ assertTrue(list.isEmpty());
+
+ // test remove from multi element list
+ list = hrll(hrl(info0, sn0), hrl(info1, sn1), hrl(info2, sn2), hrl(info9, sn2));
+ assertTrue(list == list.remove(hrl(info1, sn3))); // no region is mapped to sn3
+ list = list.remove(hrl(info0, sn0));
+ assertNull(list.getRegionLocation(0));
+ assertEquals(sn1, list.getRegionLocation(1).getServerName());
+ assertEquals(sn2, list.getRegionLocation(2).getServerName());
+ assertNull(list.getRegionLocation(5));
+ assertEquals(sn2, list.getRegionLocation(9).getServerName());
+
+ list = list.remove(hrl(info9, sn2));
+ assertNull(list.getRegionLocation(0));
+ assertEquals(sn1, list.getRegionLocation(1).getServerName());
+ assertEquals(sn2, list.getRegionLocation(2).getServerName());
+ assertNull(list.getRegionLocation(5));
+ assertNull(list.getRegionLocation(9));
+
+
+ // test multi-element remove from multi element list
+ list = hrll(hrl(info0, sn1), hrl(info1, sn1), hrl(info2, sn0), hrl(info9, sn0));
+ list = list.remove(hrl(info9, sn0));
+ assertEquals(sn1, list.getRegionLocation(0).getServerName());
+ assertEquals(sn1, list.getRegionLocation(1).getServerName());
+ assertEquals(sn0, list.getRegionLocation(2).getServerName());
+ assertNull(list.getRegionLocation(5));
+ assertNull(list.getRegionLocation(9));
+ }
+
+ @Test
+ public void testUpdateLocation() {
+ RegionLocations list;
+
+ // test add to empty list
+ list = new RegionLocations();
+ list = list.updateLocation(hrl(info0, sn1), false, false);
+ assertEquals(sn1, list.getRegionLocation(0).getServerName());
+
+ // test add to non-empty list
+ list = list.updateLocation(hrl(info9, sn3, 10), false, false);
+ assertEquals(sn3, list.getRegionLocation(9).getServerName());
+ assertEquals(10, list.size());
+ list = list.updateLocation(hrl(info2, sn2, 10), false, false);
+ assertEquals(sn2, list.getRegionLocation(2).getServerName());
+ assertEquals(10, list.size());
+
+ // test update greater SeqNum
+ list = list.updateLocation(hrl(info2, sn3, 11), false, false);
+ assertEquals(sn3, list.getRegionLocation(2).getServerName());
+ assertEquals(sn3, list.getRegionLocation(9).getServerName());
+
+ // test update equal SeqNum
+ list = list.updateLocation(hrl(info2, sn1, 11), false, false); // should not update
+ assertEquals(sn3, list.getRegionLocation(2).getServerName());
+ assertEquals(sn3, list.getRegionLocation(9).getServerName());
+ list = list.updateLocation(hrl(info2, sn1, 11), true, false); // should update
+ assertEquals(sn1, list.getRegionLocation(2).getServerName());
+ assertEquals(sn3, list.getRegionLocation(9).getServerName());
+
+ // test force update
+ list = list.updateLocation(hrl(info2, sn2, 9), false, true); // should update
+ assertEquals(sn2, list.getRegionLocation(2).getServerName());
+ assertEquals(sn3, list.getRegionLocation(9).getServerName());
+ }
+
+ @Test
+ public void testMergeLocations() {
+ RegionLocations list1, list2;
+
+ // test merge empty lists
+ list1 = new RegionLocations();
+ list2 = new RegionLocations();
+
+ assertTrue(list1 == list1.mergeLocations(list2));
+
+ // test merge non-empty and empty
+ list2 = hrll(hrl(info0, sn0));
+ list1 = list1.mergeLocations(list2);
+ assertEquals(sn0, list1.getRegionLocation(0).getServerName());
+
+ // test merge empty and non empty
+ list1 = hrll();
+ list1 = list2.mergeLocations(list1);
+ assertEquals(sn0, list1.getRegionLocation(0).getServerName());
+
+ // test merge non intersecting
+ list1 = hrll(hrl(info0, sn0), hrl(info1, sn1));
+ list2 = hrll(hrl(info2, sn2));
+ list1 = list2.mergeLocations(list1);
+ assertEquals(sn0, list1.getRegionLocation(0).getServerName());
+ assertEquals(sn1, list1.getRegionLocation(1).getServerName());
+ assertEquals(sn2, list1.getRegionLocation(2).getServerName());
+
+ // do the other way merge as well
+ list1 = hrll(hrl(info0, sn0), hrl(info1, sn1));
+ list2 = hrll(hrl(info2, sn2));
+ list1 = list1.mergeLocations(list2);
+ assertEquals(sn0, list1.getRegionLocation(0).getServerName());
+ assertEquals(sn1, list1.getRegionLocation(1).getServerName());
+ assertEquals(sn2, list1.getRegionLocation(2).getServerName());
+
+ // test intersecting lists same seqNum
+ list1 = hrll(hrl(info0, sn0), hrl(info1, sn1));
+ list2 = hrll(hrl(info0, sn2), hrl(info1, sn2), hrl(info9, sn3));
+ list1 = list2.mergeLocations(list1); // list1 should override
+ assertEquals(10, list1.size());
+ assertEquals(sn0, list1.getRegionLocation(0).getServerName());
+ assertEquals(sn1, list1.getRegionLocation(1).getServerName());
+ assertEquals(sn3, list1.getRegionLocation(9).getServerName());
+
+ // do the other way
+ list1 = hrll(hrl(info0, sn0), hrl(info1, sn1));
+ list2 = hrll(hrl(info0, sn2), hrl(info1, sn2), hrl(info9, sn3));
+ list1 = list1.mergeLocations(list2); // list2 should override
+ assertEquals(10, list1.size());
+ assertEquals(sn2, list1.getRegionLocation(0).getServerName());
+ assertEquals(sn2, list1.getRegionLocation(1).getServerName());
+ assertEquals(sn3, list1.getRegionLocation(9).getServerName());
+
+ // test intersecting lists different seqNum
+ list1 = hrll(hrl(info0, sn0, 10), hrl(info1, sn1, 10));
+ list2 = hrll(hrl(info0, sn2, 11), hrl(info1, sn2, 11), hrl(info9, sn3, 11));
+ list1 = list1.mergeLocations(list2); // list2 should override because of seqNum
+ assertEquals(10, list1.size());
+ assertEquals(sn2, list1.getRegionLocation(0).getServerName());
+ assertEquals(sn2, list1.getRegionLocation(1).getServerName());
+ assertEquals(sn3, list1.getRegionLocation(9).getServerName());
+
+ // do the other way
+ list1 = hrll(hrl(info0, sn0, 10), hrl(info1, sn1, 10));
+ list2 = hrll(hrl(info0, sn2, 11), hrl(info1, sn2, 11), hrl(info9, sn3, 11));
+ list1 = list1.mergeLocations(list2); // list2 should override
+ assertEquals(10, list1.size());
+ assertEquals(sn2, list1.getRegionLocation(0).getServerName());
+ assertEquals(sn2, list1.getRegionLocation(1).getServerName());
+ assertEquals(sn3, list1.getRegionLocation(9).getServerName());
+ }
+}
diff --git hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
index a26ac15..5c972b7 100644
--- hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
+++ hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.RegionTooBusyException;
import org.apache.hadoop.hbase.ServerName;
@@ -117,8 +118,9 @@ public class TestClientNoCluster extends Configured implements Tool {
}
@Override
- public HRegionLocation getMetaRegionLocation() throws IOException {
- return new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, META_HOST);
+ public RegionLocations getMetaRegionLocation() throws IOException {
+ return new RegionLocations(
+ new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, META_HOST));
}
@Override
@@ -142,7 +144,7 @@ public class TestClientNoCluster extends Configured implements Tool {
* Remove the @Ignore to try out timeout and retry asettings
* @throws IOException
*/
- @Ignore
+ @Ignore
@Test
public void testTimeoutAndRetries() throws IOException {
Configuration localConfig = HBaseConfiguration.create(this.conf);
@@ -759,7 +761,7 @@ public class TestClientNoCluster extends Configured implements Tool {
// an exception is thrown -- usually RegionTooBusyException when we have more than
// hbase.test.multi.too.many requests outstanding at any time.
getConf().setInt("hbase.client.start.log.errors.counter", 0);
-
+
// Ugly but this is only way to pass in configs.into ManyServersManyRegionsConnection class.
getConf().setInt("hbase.test.regions", regions);
getConf().setLong("hbase.test.namespace.span", namespaceSpan);
diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 664ec6f..a555a57 100644
--- hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -387,13 +387,19 @@ public final class HConstants {
public static final byte [] REGIONINFO_QUALIFIER = Bytes.toBytes(REGIONINFO_QUALIFIER_STR);
/** The server column qualifier */
- public static final byte [] SERVER_QUALIFIER = Bytes.toBytes("server");
+ public static final String SERVER_QUALIFIER_STR = "server";
+ /** The server column qualifier */
+ public static final byte [] SERVER_QUALIFIER = Bytes.toBytes(SERVER_QUALIFIER_STR);
/** The startcode column qualifier */
- public static final byte [] STARTCODE_QUALIFIER = Bytes.toBytes("serverstartcode");
+ public static final String STARTCODE_QUALIFIER_STR = "serverstartcode";
+ /** The startcode column qualifier */
+ public static final byte [] STARTCODE_QUALIFIER = Bytes.toBytes(STARTCODE_QUALIFIER_STR);
/** The open seqnum column qualifier */
- public static final byte [] SEQNUM_QUALIFIER = Bytes.toBytes("seqnumDuringOpen");
+ public static final String SEQNUM_QUALIFIER_STR = "seqnumDuringOpen";
+ /** The open seqnum column qualifier */
+ public static final byte [] SEQNUM_QUALIFIER = Bytes.toBytes(SEQNUM_QUALIFIER_STR);
/** The lower-half split region column qualifier */
public static final byte [] SPLITA_QUALIFIER = Bytes.toBytes("splitA");
diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
index 7acf1d3..81af6dc 100644
--- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
+++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
@@ -3687,6 +3687,16 @@ public final class HBaseProtos {
* optional bool split = 6;
*/
boolean getSplit();
+
+ // optional int32 replica_id = 7 [default = 0];
+ /**
+ * optional int32 replica_id = 7 [default = 0];
+ */
+ boolean hasReplicaId();
+ /**
+ * optional int32 replica_id = 7 [default = 0];
+ */
+ int getReplicaId();
}
/**
* Protobuf type {@code RegionInfo}
@@ -3782,6 +3792,11 @@ public final class HBaseProtos {
split_ = input.readBool();
break;
}
+ case 56: {
+ bitField0_ |= 0x00000040;
+ replicaId_ = input.readInt32();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -3924,6 +3939,22 @@ public final class HBaseProtos {
return split_;
}
+ // optional int32 replica_id = 7 [default = 0];
+ public static final int REPLICA_ID_FIELD_NUMBER = 7;
+ private int replicaId_;
+ /**
+ * optional int32 replica_id = 7 [default = 0];
+ */
+ public boolean hasReplicaId() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * optional int32 replica_id = 7 [default = 0];
+ */
+ public int getReplicaId() {
+ return replicaId_;
+ }
+
private void initFields() {
regionId_ = 0L;
tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
@@ -3931,6 +3962,7 @@ public final class HBaseProtos {
endKey_ = com.google.protobuf.ByteString.EMPTY;
offline_ = false;
split_ = false;
+ replicaId_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -3974,6 +4006,9 @@ public final class HBaseProtos {
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeBool(6, split_);
}
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ output.writeInt32(7, replicaId_);
+ }
getUnknownFields().writeTo(output);
}
@@ -4007,6 +4042,10 @@ public final class HBaseProtos {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(6, split_);
}
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(7, replicaId_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -4060,6 +4099,11 @@ public final class HBaseProtos {
result = result && (getSplit()
== other.getSplit());
}
+ result = result && (hasReplicaId() == other.hasReplicaId());
+ if (hasReplicaId()) {
+ result = result && (getReplicaId()
+ == other.getReplicaId());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -4097,6 +4141,10 @@ public final class HBaseProtos {
hash = (37 * hash) + SPLIT_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getSplit());
}
+ if (hasReplicaId()) {
+ hash = (37 * hash) + REPLICA_ID_FIELD_NUMBER;
+ hash = (53 * hash) + getReplicaId();
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -4228,6 +4276,8 @@ public final class HBaseProtos {
bitField0_ = (bitField0_ & ~0x00000010);
split_ = false;
bitField0_ = (bitField0_ & ~0x00000020);
+ replicaId_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
@@ -4284,6 +4334,10 @@ public final class HBaseProtos {
to_bitField0_ |= 0x00000020;
}
result.split_ = split_;
+ if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
+ to_bitField0_ |= 0x00000040;
+ }
+ result.replicaId_ = replicaId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -4318,6 +4372,9 @@ public final class HBaseProtos {
if (other.hasSplit()) {
setSplit(other.getSplit());
}
+ if (other.hasReplicaId()) {
+ setReplicaId(other.getReplicaId());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -4645,6 +4702,39 @@ public final class HBaseProtos {
return this;
}
+ // optional int32 replica_id = 7 [default = 0];
+ private int replicaId_ ;
+ /**
+ * optional int32 replica_id = 7 [default = 0];
+ */
+ public boolean hasReplicaId() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * optional int32 replica_id = 7 [default = 0];
+ */
+ public int getReplicaId() {
+ return replicaId_;
+ }
+ /**
+ * optional int32 replica_id = 7 [default = 0];
+ */
+ public Builder setReplicaId(int value) {
+ bitField0_ |= 0x00000040;
+ replicaId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 replica_id = 7 [default = 0];
+ */
+ public Builder clearReplicaId() {
+ bitField0_ = (bitField0_ & ~0x00000040);
+ replicaId_ = 0;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:RegionInfo)
}
@@ -15778,41 +15868,42 @@ public final class HBaseProtos {
"ingPair\"o\n\022ColumnFamilySchema\022\014\n\004name\030\001 " +
"\002(\014\022#\n\nattributes\030\002 \003(\0132\017.BytesBytesPair" +
"\022&\n\rconfiguration\030\003 \003(\0132\017.NameStringPair" +
- "\"\203\001\n\nRegionInfo\022\021\n\tregion_id\030\001 \002(\004\022\036\n\nta",
+ "\"\232\001\n\nRegionInfo\022\021\n\tregion_id\030\001 \002(\004\022\036\n\nta",
"ble_name\030\002 \002(\0132\n.TableName\022\021\n\tstart_key\030" +
"\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007offline\030\005 \001(\010\022" +
- "\r\n\005split\030\006 \001(\010\"1\n\014FavoredNodes\022!\n\014favore" +
- "d_node\030\001 \003(\0132\013.ServerName\"\225\001\n\017RegionSpec" +
- "ifier\0222\n\004type\030\001 \002(\0162$.RegionSpecifier.Re" +
- "gionSpecifierType\022\r\n\005value\030\002 \002(\014\"?\n\023Regi" +
- "onSpecifierType\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCO" +
- "DED_REGION_NAME\020\002\"%\n\tTimeRange\022\014\n\004from\030\001" +
- " \001(\004\022\n\n\002to\030\002 \001(\004\"A\n\nServerName\022\021\n\thost_n" +
- "ame\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_code\030\003 ",
- "\001(\004\"\033\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016Name" +
- "StringPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\"" +
- ",\n\rNameBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030" +
- "\002 \001(\014\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016" +
- "\n\006second\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030" +
- "\001 \001(\t\022\r\n\005value\030\002 \001(\003\"\256\001\n\023SnapshotDescrip" +
- "tion\022\014\n\004name\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcre" +
- "ation_time\030\003 \001(\003:\0010\022.\n\004type\030\004 \001(\0162\031.Snap" +
- "shotDescription.Type:\005FLUSH\022\017\n\007version\030\005" +
- " \001(\005\"\037\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH\020\001\"}\n",
- "\024ProcedureDescription\022\021\n\tsignature\030\001 \002(\t" +
- "\022\020\n\010instance\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(" +
- "\003:\0010\022&\n\rconfiguration\030\004 \003(\0132\017.NameString" +
- "Pair\"\n\n\010EmptyMsg\"\033\n\007LongMsg\022\020\n\010long_msg\030" +
- "\001 \002(\003\"\'\n\rBigDecimalMsg\022\026\n\016bigdecimal_msg" +
- "\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig_bits\030\001 \002(\004\022\025" +
- "\n\rmost_sig_bits\030\002 \002(\004\"K\n\023NamespaceDescri" +
- "ptor\022\014\n\004name\030\001 \002(\014\022&\n\rconfiguration\030\002 \003(" +
- "\0132\017.NameStringPair\"$\n\020RegionServerInfo\022\020" +
- "\n\010infoPort\030\001 \001(\005*r\n\013CompareType\022\010\n\004LESS\020",
- "\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_E" +
- "QUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER\020" +
- "\005\022\t\n\005NO_OP\020\006B>\n*org.apache.hadoop.hbase." +
- "protobuf.generatedB\013HBaseProtosH\001\240\001\001"
+ "\r\n\005split\030\006 \001(\010\022\025\n\nreplica_id\030\007 \001(\005:\0010\"1\n" +
+ "\014FavoredNodes\022!\n\014favored_node\030\001 \003(\0132\013.Se" +
+ "rverName\"\225\001\n\017RegionSpecifier\0222\n\004type\030\001 \002" +
+ "(\0162$.RegionSpecifier.RegionSpecifierType" +
+ "\022\r\n\005value\030\002 \002(\014\"?\n\023RegionSpecifierType\022\017" +
+ "\n\013REGION_NAME\020\001\022\027\n\023ENCODED_REGION_NAME\020\002" +
+ "\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"" +
+ "A\n\nServerName\022\021\n\thost_name\030\001 \002(\t\022\014\n\004port",
+ "\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033\n\013Coprocesso" +
+ "r\022\014\n\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n\004nam" +
+ "e\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesPair\022" +
+ "\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016BytesByt" +
+ "esPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(\014\",\n" +
+ "\rNameInt64Pair\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002 " +
+ "\001(\003\"\256\001\n\023SnapshotDescription\022\014\n\004name\030\001 \002(" +
+ "\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(\003:" +
+ "\0010\022.\n\004type\030\004 \001(\0162\031.SnapshotDescription.T" +
+ "ype:\005FLUSH\022\017\n\007version\030\005 \001(\005\"\037\n\004Type\022\014\n\010D",
+ "ISABLED\020\000\022\t\n\005FLUSH\020\001\"}\n\024ProcedureDescrip" +
+ "tion\022\021\n\tsignature\030\001 \002(\t\022\020\n\010instance\030\002 \001(" +
+ "\t\022\030\n\rcreation_time\030\003 \001(\003:\0010\022&\n\rconfigura" +
+ "tion\030\004 \003(\0132\017.NameStringPair\"\n\n\010EmptyMsg\"" +
+ "\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\'\n\rBigDecim" +
+ "alMsg\022\026\n\016bigdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n" +
+ "\016least_sig_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002" +
+ " \002(\004\"K\n\023NamespaceDescriptor\022\014\n\004name\030\001 \002(" +
+ "\014\022&\n\rconfiguration\030\002 \003(\0132\017.NameStringPai" +
+ "r\"$\n\020RegionServerInfo\022\020\n\010infoPort\030\001 \001(\005*",
+ "r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUA" +
+ "L\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER" +
+ "_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006B>\n*o" +
+ "rg.apache.hadoop.hbase.protobuf.generate" +
+ "dB\013HBaseProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -15842,7 +15933,7 @@ public final class HBaseProtos {
internal_static_RegionInfo_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionInfo_descriptor,
- new java.lang.String[] { "RegionId", "TableName", "StartKey", "EndKey", "Offline", "Split", });
+ new java.lang.String[] { "RegionId", "TableName", "StartKey", "EndKey", "Offline", "Split", "ReplicaId", });
internal_static_FavoredNodes_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_FavoredNodes_fieldAccessorTable = new
diff --git hbase-protocol/src/main/protobuf/HBase.proto hbase-protocol/src/main/protobuf/HBase.proto
index a966c40..4874fc2 100644
--- hbase-protocol/src/main/protobuf/HBase.proto
+++ hbase-protocol/src/main/protobuf/HBase.proto
@@ -64,6 +64,7 @@ message RegionInfo {
optional bytes end_key = 4;
optional bool offline = 5;
optional bool split = 6;
+ optional int32 replica_id = 7 [default = 0];
}
/**
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
index 89fa0a9..5b5446b 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.catalog;
import java.io.IOException;
import java.io.InterruptedIOException;
-import java.net.ConnectException;
import java.util.ArrayList;
import java.util.List;
@@ -47,11 +46,10 @@ import com.google.protobuf.ServiceException;
/**
* Writes region and assignment information to hbase:meta.
- * TODO: Put MetaReader and MetaEditor together; doesn't make sense having
- * them distinct. see HBASE-3475.
*/
@InterfaceAudience.Private
-public class MetaEditor {
+public class MetaEditor extends MetaReader {
+
// TODO: Strip CatalogTracker from this class. Its all over and in the end
// its only used to get its Configuration so we can get associated
// Connection.
@@ -290,7 +288,7 @@ public class MetaEditor {
Put put = new Put(regionInfo.getRegionName());
addRegionInfo(put, regionInfo);
if (sn != null) {
- addLocation(put, sn, openSeqNum);
+ addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
}
putToMetaTable(catalogTracker, put);
LOG.info("Added daughter " + regionInfo.getEncodedName() +
@@ -327,7 +325,7 @@ public class MetaEditor {
Delete deleteB = makeDeleteFromRegionInfo(regionB);
// The merged is a new region, openSeqNum = 1 is fine.
- addLocation(putOfMerged, sn, 1);
+ addLocation(putOfMerged, sn, 1, mergedRegion.getReplicaId());
byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString()
+ HConstants.DELIMITER);
@@ -365,8 +363,8 @@ public class MetaEditor {
Put putA = makePutFromRegionInfo(splitA);
Put putB = makePutFromRegionInfo(splitB);
- addLocation(putA, sn, 1); //these are new regions, openSeqNum = 1 is fine.
- addLocation(putB, sn, 1);
+ addLocation(putA, sn, 1, splitA.getReplicaId()); //new regions, openSeqNum = 1 is fine.
+ addLocation(putB, sn, 1, splitB.getReplicaId());
byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
multiMutate(meta, tableRow, putParent, putA, putB);
@@ -401,29 +399,6 @@ public class MetaEditor {
}
}
-
- /**
- * Updates the location of the specified hbase:meta region in ROOT to be the
- * specified server hostname and startcode.
- *
- * Uses passed catalog tracker to get a connection to the server hosting
- * ROOT and makes edits to that region.
- *
- * @param catalogTracker catalog tracker
- * @param regionInfo region to update location of
- * @param sn Server name
- * @param openSeqNum the latest sequence number obtained when the region was open
- * @throws IOException
- * @throws ConnectException Usually because the regionserver carrying hbase:meta
- * is down.
- * @throws NullPointerException Because no -ROOT- server connection
- */
- public static void updateMetaLocation(CatalogTracker catalogTracker,
- HRegionInfo regionInfo, ServerName sn, long openSeqNum)
- throws IOException, ConnectException {
- updateLocation(catalogTracker, regionInfo, sn, openSeqNum);
- }
-
/**
* Updates the location of the specified region in hbase:meta to be the specified
* server hostname and startcode.
@@ -458,8 +433,9 @@ public class MetaEditor {
private static void updateLocation(final CatalogTracker catalogTracker,
HRegionInfo regionInfo, ServerName sn, long openSeqNum)
throws IOException {
- Put put = new Put(regionInfo.getRegionName());
- addLocation(put, sn, openSeqNum);
+ // region replicas are kept in the primary region's row
+ Put put = new Put(getMetaKeyForRegion(regionInfo));
+ addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
putToCatalogTable(catalogTracker, put);
LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
" with server=" + sn);
@@ -568,12 +544,12 @@ public class MetaEditor {
return p;
}
- private static Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
- p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
+ private static Put addLocation(final Put p, final ServerName sn, long openSeqNum, int replicaId){
+ p.addImmutable(HConstants.CATALOG_FAMILY, MetaReader.getServerColumn(replicaId),
Bytes.toBytes(sn.getHostAndPort()));
- p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
+ p.addImmutable(HConstants.CATALOG_FAMILY, MetaReader.getStartCodeColumn(replicaId),
Bytes.toBytes(sn.getStartcode()));
- p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER,
+ p.addImmutable(HConstants.CATALOG_FAMILY, MetaReader.getSeqNumColumn(replicaId),
Bytes.toBytes(openSeqNum));
return p;
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java
index f64a01e..9e39f0c 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java
@@ -31,10 +31,6 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
@@ -97,6 +93,7 @@ public class CoprocessorHConnection implements HConnection {
this.delegate = delegate;
}
+ @Override
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService.BlockingInterface
getClient(ServerName serverName) throws IOException {
// client is trying to reach off-server, so we can't do anything special
@@ -129,260 +126,320 @@ public class CoprocessorHConnection implements HConnection {
return ClientService.newBlockingStub(channel);
}
+ @Override
public void abort(String why, Throwable e) {
delegate.abort(why, e);
}
+ @Override
public boolean isAborted() {
return delegate.isAborted();
}
+ @Override
public Configuration getConfiguration() {
return delegate.getConfiguration();
}
+ @Override
public HTableInterface getTable(String tableName) throws IOException {
return delegate.getTable(tableName);
}
+ @Override
public HTableInterface getTable(byte[] tableName) throws IOException {
return delegate.getTable(tableName);
}
+ @Override
public HTableInterface getTable(TableName tableName) throws IOException {
return delegate.getTable(tableName);
}
+ @Override
public HTableInterface getTable(String tableName, ExecutorService pool) throws IOException {
return delegate.getTable(tableName, pool);
}
+ @Override
public HTableInterface getTable(byte[] tableName, ExecutorService pool) throws IOException {
return delegate.getTable(tableName, pool);
}
+ @Override
public HTableInterface getTable(TableName tableName, ExecutorService pool) throws IOException {
return delegate.getTable(tableName, pool);
}
+ @Override
public boolean isMasterRunning() throws MasterNotRunningException, ZooKeeperConnectionException {
return delegate.isMasterRunning();
}
+ @Override
public boolean isTableEnabled(TableName tableName) throws IOException {
return delegate.isTableEnabled(tableName);
}
+ @Override
public boolean isTableEnabled(byte[] tableName) throws IOException {
return delegate.isTableEnabled(tableName);
}
+ @Override
public boolean isTableDisabled(TableName tableName) throws IOException {
return delegate.isTableDisabled(tableName);
}
+ @Override
public boolean isTableDisabled(byte[] tableName) throws IOException {
return delegate.isTableDisabled(tableName);
}
+ @Override
public boolean isTableAvailable(TableName tableName) throws IOException {
return delegate.isTableAvailable(tableName);
}
+ @Override
public boolean isTableAvailable(byte[] tableName) throws IOException {
return delegate.isTableAvailable(tableName);
}
+ @Override
public boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException {
return delegate.isTableAvailable(tableName, splitKeys);
}
+ @Override
public boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws IOException {
return delegate.isTableAvailable(tableName, splitKeys);
}
+ @Override
public HTableDescriptor[] listTables() throws IOException {
return delegate.listTables();
}
+ @Override
public String[] getTableNames() throws IOException {
return delegate.getTableNames();
}
+ @Override
public TableName[] listTableNames() throws IOException {
return delegate.listTableNames();
}
+ @Override
public HTableDescriptor getHTableDescriptor(TableName tableName) throws IOException {
return delegate.getHTableDescriptor(tableName);
}
+ @Override
public HTableDescriptor getHTableDescriptor(byte[] tableName) throws IOException {
return delegate.getHTableDescriptor(tableName);
}
+ @Override
public HRegionLocation locateRegion(TableName tableName, byte[] row) throws IOException {
return delegate.locateRegion(tableName, row);
}
+ @Override
public HRegionLocation locateRegion(byte[] tableName, byte[] row) throws IOException {
return delegate.locateRegion(tableName, row);
}
+ @Override
public void clearRegionCache() {
delegate.clearRegionCache();
}
+ @Override
public void clearRegionCache(TableName tableName) {
delegate.clearRegionCache(tableName);
}
+ @Override
public void clearRegionCache(byte[] tableName) {
delegate.clearRegionCache(tableName);
}
+ @Override
public HRegionLocation relocateRegion(TableName tableName, byte[] row) throws IOException {
return delegate.relocateRegion(tableName, row);
}
+ @Override
public HRegionLocation relocateRegion(byte[] tableName, byte[] row) throws IOException {
return delegate.relocateRegion(tableName, row);
}
- public void updateCachedLocations(TableName tableName, byte[] rowkey, Object exception,
- ServerName source) {
- delegate.updateCachedLocations(tableName, rowkey, exception, source);
+ @Override
+ public void updateCachedLocations(TableName tableName, byte[] regionName, byte[] rowkey,
+ Object exception, ServerName source) {
+ delegate.updateCachedLocations(tableName, regionName, rowkey, exception, source);
}
+ @Override
public void updateCachedLocations(TableName tableName, byte[] rowkey, Object exception,
HRegionLocation source) {
delegate.updateCachedLocations(tableName, rowkey, exception, source);
}
+ @Override
public void updateCachedLocations(byte[] tableName, byte[] rowkey, Object exception,
HRegionLocation source) {
delegate.updateCachedLocations(tableName, rowkey, exception, source);
}
+ @Override
public HRegionLocation locateRegion(byte[] regionName) throws IOException {
return delegate.locateRegion(regionName);
}
+ @Override
public List locateRegions(TableName tableName) throws IOException {
return delegate.locateRegions(tableName);
}
+ @Override
public List locateRegions(byte[] tableName) throws IOException {
return delegate.locateRegions(tableName);
}
+ @Override
public List
locateRegions(TableName tableName, boolean useCache, boolean offlined) throws IOException {
return delegate.locateRegions(tableName, useCache, offlined);
}
+ @Override
public List locateRegions(byte[] tableName, boolean useCache, boolean offlined)
throws IOException {
return delegate.locateRegions(tableName, useCache, offlined);
}
+ @Override
public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.BlockingInterface getMaster()
throws IOException {
return delegate.getMaster();
}
+ @Override
public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
getAdmin(ServerName serverName) throws IOException {
return delegate.getAdmin(serverName);
}
+ @Override
public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
getAdmin(ServerName serverName, boolean getMaster) throws IOException {
return delegate.getAdmin(serverName, getMaster);
}
+ @Override
public HRegionLocation getRegionLocation(TableName tableName, byte[] row, boolean reload)
throws IOException {
return delegate.getRegionLocation(tableName, row, reload);
}
+ @Override
public HRegionLocation getRegionLocation(byte[] tableName, byte[] row, boolean reload)
throws IOException {
return delegate.getRegionLocation(tableName, row, reload);
}
+ @Override
public void processBatch(List extends Row> actions, TableName tableName, ExecutorService pool,
Object[] results) throws IOException, InterruptedException {
delegate.processBatch(actions, tableName, pool, results);
}
+ @Override
public void processBatch(List extends Row> actions, byte[] tableName, ExecutorService pool,
Object[] results) throws IOException, InterruptedException {
delegate.processBatch(actions, tableName, pool, results);
}
+ @Override
public void processBatchCallback(List extends Row> list, TableName tableName,
ExecutorService pool, Object[] results, Callback callback) throws IOException,
InterruptedException {
delegate.processBatchCallback(list, tableName, pool, results, callback);
}
+ @Override
public void processBatchCallback(List extends Row> list, byte[] tableName,
ExecutorService pool, Object[] results, Callback callback) throws IOException,
InterruptedException {
delegate.processBatchCallback(list, tableName, pool, results, callback);
}
+ @Override
public void setRegionCachePrefetch(TableName tableName, boolean enable) {
delegate.setRegionCachePrefetch(tableName, enable);
}
+ @Override
public void setRegionCachePrefetch(byte[] tableName, boolean enable) {
delegate.setRegionCachePrefetch(tableName, enable);
}
+ @Override
public boolean getRegionCachePrefetch(TableName tableName) {
return delegate.getRegionCachePrefetch(tableName);
}
+ @Override
public boolean getRegionCachePrefetch(byte[] tableName) {
return delegate.getRegionCachePrefetch(tableName);
}
+ @Override
public int getCurrentNrHRS() throws IOException {
return delegate.getCurrentNrHRS();
}
+ @Override
public HTableDescriptor[] getHTableDescriptorsByTableName(List tableNames)
throws IOException {
return delegate.getHTableDescriptorsByTableName(tableNames);
}
+ @Override
public HTableDescriptor[] getHTableDescriptors(List tableNames) throws IOException {
return delegate.getHTableDescriptors(tableNames);
}
+ @Override
public boolean isClosed() {
return delegate.isClosed();
}
+ @Override
public void clearCaches(ServerName sn) {
delegate.clearCaches(sn);
}
+ @Override
public void close() throws IOException {
delegate.close();
}
+ @Override
public void deleteCachedRegionLocation(HRegionLocation location) {
delegate.deleteCachedRegionLocation(location);
}
+ @Override
public MasterKeepAliveConnection getKeepAliveMasterService()
throws MasterNotRunningException {
return delegate.getKeepAliveMasterService();
}
+ @Override
public boolean isDeadServer(ServerName serverName) {
return delegate.isDeadServer(serverName);
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java
index 6ed0445..b6f5c4b 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java
@@ -25,24 +25,28 @@ import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.List;
+import java.util.Random;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.AfterClass;
+import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -64,7 +68,7 @@ public class TestMetaReaderEditor {
LOG.info(why, e);
abort.set(true);
}
-
+
@Override
public boolean isAborted() {
return abort.get();
@@ -294,5 +298,100 @@ public class TestMetaReaderEditor {
pair.getFirst().getEncodedName());
}
+ @Test
+ public void testParseReplicaIdFromServerColumn() {
+ String column1 = HConstants.SERVER_QUALIFIER_STR;
+ assertEquals(0, MetaReader.parseReplicaIdFromServerColumn(Bytes.toBytes(column1)));
+ String column2 = column1 + MetaReader.META_REPLICA_ID_DELIMITER;
+ assertEquals(-1, MetaReader.parseReplicaIdFromServerColumn(Bytes.toBytes(column2)));
+ String column3 = column2 + "00";
+ assertEquals(-1, MetaReader.parseReplicaIdFromServerColumn(Bytes.toBytes(column3)));
+ String column4 = column3 + "2A";
+ assertEquals(42, MetaReader.parseReplicaIdFromServerColumn(Bytes.toBytes(column4)));
+ String column5 = column4 + "2A";
+ assertEquals(-1, MetaReader.parseReplicaIdFromServerColumn(Bytes.toBytes(column5)));
+ String column6 = HConstants.STARTCODE_QUALIFIER_STR;
+ assertEquals(-1, MetaReader.parseReplicaIdFromServerColumn(Bytes.toBytes(column6)));
+ }
+
+ @Test
+ public void testMetaReaderGetColumnMethods() {
+ Assert.assertArrayEquals(HConstants.SERVER_QUALIFIER, MetaReader.getServerColumn(0));
+ Assert.assertArrayEquals(Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR
+ + MetaReader.META_REPLICA_ID_DELIMITER + "002A"), MetaReader.getServerColumn(42));
+
+ Assert.assertArrayEquals(HConstants.STARTCODE_QUALIFIER, MetaReader.getStartCodeColumn(0));
+ Assert.assertArrayEquals(Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR
+ + MetaReader.META_REPLICA_ID_DELIMITER + "002A"), MetaReader.getStartCodeColumn(42));
+
+ Assert.assertArrayEquals(HConstants.SEQNUM_QUALIFIER, MetaReader.getSeqNumColumn(0));
+ Assert.assertArrayEquals(Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR
+ + MetaReader.META_REPLICA_ID_DELIMITER + "002A"), MetaReader.getSeqNumColumn(42));
+ }
+
+ @Test
+ public void testMetaLocationsForRegionReplicas() throws IOException {
+ Random random = new Random();
+ ServerName serverName0 = ServerName.valueOf("foo", 60010, random.nextLong());
+ ServerName serverName1 = ServerName.valueOf("bar", 60010, random.nextLong());
+ ServerName serverName100 = ServerName.valueOf("baz", 60010, random.nextLong());
+
+ long regionId = System.currentTimeMillis();
+ HRegionInfo primary = new HRegionInfo(TableName.valueOf("table_foo"),
+ HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, regionId, 0);
+ HRegionInfo replica1 = new HRegionInfo(TableName.valueOf("table_foo"),
+ HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, regionId, 1);
+ HRegionInfo replica100 = new HRegionInfo(TableName.valueOf("table_foo"),
+ HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, regionId, 100);
+
+ long seqNum0 = random.nextLong();
+ long seqNum1 = random.nextLong();
+ long seqNum100 = random.nextLong();
+
+
+ HTable meta = MetaReader.getMetaHTable(CT);
+ try {
+ MetaEditor.updateRegionLocation(CT, primary, serverName0, seqNum0);
+
+ // assert that the server, startcode and seqNum columns are there for the primary region
+ assertMetaLocation(meta, primary.getRegionName(), serverName0, seqNum0, 0, true);
+
+ // add replica = 1
+ MetaEditor.updateRegionLocation(CT, replica1, serverName1, seqNum1);
+ // check whether the primary is still there
+ assertMetaLocation(meta, primary.getRegionName(), serverName0, seqNum0, 0, true);
+ // now check for replica 1
+ assertMetaLocation(meta, primary.getRegionName(), serverName1, seqNum1, 1, true);
+
+ // add replica = 1
+ MetaEditor.updateRegionLocation(CT, replica100, serverName100, seqNum100);
+ // check whether the primary is still there
+ assertMetaLocation(meta, primary.getRegionName(), serverName0, seqNum0, 0, true);
+ // check whether the replica 1 is still there
+ assertMetaLocation(meta, primary.getRegionName(), serverName1, seqNum1, 1, true);
+ // now check for replica 1
+ assertMetaLocation(meta, primary.getRegionName(), serverName100, seqNum100, 100, true);
+ } finally {
+ meta.close();
+ }
+ }
+
+ public static void assertMetaLocation(HTable meta, byte[] row, ServerName serverName,
+ long seqNum, int replicaId, boolean checkSeqNum) throws IOException {
+ Get get = new Get(row);
+ Result result = meta.get(get);
+ assertTrue(Bytes.equals(
+ result.getValue(HConstants.CATALOG_FAMILY, MetaReader.getServerColumn(replicaId)),
+ Bytes.toBytes(serverName.getHostAndPort())));
+ assertTrue(Bytes.equals(
+ result.getValue(HConstants.CATALOG_FAMILY, MetaReader.getStartCodeColumn(replicaId)),
+ Bytes.toBytes(serverName.getStartcode())));
+ if (checkSeqNum) {
+ assertTrue(Bytes.equals(
+ result.getValue(HConstants.CATALOG_FAMILY, MetaReader.getSeqNumColumn(replicaId)),
+ Bytes.toBytes(seqNum)));
+ }
+ }
+
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 7cd2557..5ada0ad 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -26,6 +26,8 @@ import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.when;
import java.io.IOException;
import java.lang.reflect.Method;
@@ -60,6 +62,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
@@ -85,14 +88,15 @@ import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
-import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
+import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.log4j.Level;
import org.junit.After;
@@ -5970,4 +5974,44 @@ public class TestFromClientSide {
assertEquals(insertNum, count);
table.close();
}
+
+ @Test
+ public void testGetStartEndKeysWithRegionReplicas() throws IOException {
+ HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testGetStartEndKeys"));
+ HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
+ htd.addFamily(fam);
+ byte[][] KEYS = HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE;
+ TEST_UTIL.getHBaseAdmin().createTable(htd, KEYS);
+ List regions = TEST_UTIL.getHBaseAdmin().getTableRegions(htd.getTableName());
+
+ for (int regionReplication = 1; regionReplication < 4 ; regionReplication++) {
+ List regionLocations = new ArrayList();
+
+ // mock region locations coming from meta with multiple replicas
+ for (HRegionInfo region : regions) {
+ HRegionLocation[] arr = new HRegionLocation[regionReplication];
+ for (int i = 0; i < arr.length; i++) {
+ arr[i] = new HRegionLocation(RegionReplicaUtil.getRegionInfoForReplica(region, i), null);
+ }
+ regionLocations.add(new RegionLocations(arr));
+ }
+
+ HTable table = spy(new HTable(TEST_UTIL.getConfiguration(), htd.getTableName()));
+ when(table.listRegionLocations()).thenReturn(regionLocations);
+
+ Pair startEndKeys = table.getStartEndKeys();
+
+ assertEquals(KEYS.length + 1, startEndKeys.getFirst().length);
+
+ for (int i = 0; i < KEYS.length + 1; i++) {
+ byte[] startKey = i == 0 ? HConstants.EMPTY_START_ROW : KEYS[i - 1];
+ byte[] endKey = i == KEYS.length ? HConstants.EMPTY_END_ROW : KEYS[i];
+ assertArrayEquals(startKey, startEndKeys.getFirst()[i]);
+ assertArrayEquals(endKey, startEndKeys.getSecond()[i]);
+ }
+
+ table.close();
+ }
+ }
+
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
index 503d4d0..56244fc 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
@@ -46,9 +46,9 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
@@ -359,15 +359,16 @@ public class TestHCM {
assertNotNull(conn.getCachedLocation(TABLE_NAME, ROW));
- final int nextPort = conn.getCachedLocation(TABLE_NAME, ROW).getPort() + 1;
- HRegionLocation loc = conn.getCachedLocation(TABLE_NAME, ROW);
+ final int nextPort = conn.getCachedLocation(TABLE_NAME, ROW).getRegionLocation().getPort() + 1;
+ HRegionLocation loc = conn.getCachedLocation(TABLE_NAME, ROW).getRegionLocation();
conn.updateCachedLocation(loc.getRegionInfo(), loc.getServerName(),
ServerName.valueOf("127.0.0.1", nextPort,
HConstants.LATEST_TIMESTAMP), HConstants.LATEST_TIMESTAMP);
- Assert.assertEquals(conn.getCachedLocation(TABLE_NAME, ROW).getPort(), nextPort);
+ Assert.assertEquals(conn.getCachedLocation(TABLE_NAME, ROW)
+ .getRegionLocation().getPort(), nextPort);
- conn.forceDeleteCachedLocation(TABLE_NAME, ROW.clone());
- HRegionLocation rl = conn.getCachedLocation(TABLE_NAME, ROW);
+ conn.clearRegionCache(TABLE_NAME, ROW.clone());
+ RegionLocations rl = conn.getCachedLocation(TABLE_NAME, ROW);
assertNull("What is this location?? " + rl, rl);
// We're now going to move the region and check that it works for the client
@@ -389,7 +390,7 @@ public class TestHCM {
}
// Now moving the region to the second server
- HRegionLocation toMove = conn.getCachedLocation(TABLE_NAME, ROW);
+ HRegionLocation toMove = conn.getCachedLocation(TABLE_NAME, ROW).getRegionLocation();
byte[] regionName = toMove.getRegionInfo().getRegionName();
byte[] encodedRegionNameBytes = toMove.getRegionInfo().getEncodedNameAsBytes();
@@ -438,7 +439,8 @@ public class TestHCM {
// Cache was NOT updated and points to the wrong server
Assert.assertFalse(
- conn.getCachedLocation(TABLE_NAME, ROW).getPort() == destServerName.getPort());
+ conn.getCachedLocation(TABLE_NAME, ROW).getRegionLocation()
+ .getPort() == destServerName.getPort());
// This part relies on a number of tries equals to 1.
// We do a put and expect the cache to be updated, even if we don't retry
@@ -462,10 +464,13 @@ public class TestHCM {
Assert.assertNotNull("Cached connection is null", conn.getCachedLocation(TABLE_NAME, ROW));
Assert.assertEquals(
"Previous server was " + curServer.getServerName().getHostAndPort(),
- destServerName.getPort(), conn.getCachedLocation(TABLE_NAME, ROW).getPort());
+ destServerName.getPort(),
+ conn.getCachedLocation(TABLE_NAME, ROW).getRegionLocation().getPort());
- Assert.assertFalse(destServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes));
- Assert.assertFalse(curServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes));
+ Assert.assertFalse(destServer.getRegionsInTransitionInRS()
+ .containsKey(encodedRegionNameBytes));
+ Assert.assertFalse(curServer.getRegionsInTransitionInRS()
+ .containsKey(encodedRegionNameBytes));
// We move it back to do another test with a scan
LOG.info("Move starting region=" + toMove.getRegionInfo().getRegionNameAsString());
@@ -488,7 +493,7 @@ public class TestHCM {
LOG.info("Move finished for region=" + toMove.getRegionInfo().getRegionNameAsString());
// Cache was NOT updated and points to the wrong server
- Assert.assertFalse(conn.getCachedLocation(TABLE_NAME, ROW).getPort() ==
+ Assert.assertFalse(conn.getCachedLocation(TABLE_NAME, ROW).getRegionLocation().getPort() ==
curServer.getServerName().getPort());
Scan sc = new Scan();
@@ -512,7 +517,8 @@ public class TestHCM {
Assert.assertNotNull(conn.getCachedLocation(TABLE_NAME, ROW));
Assert.assertEquals(
"Previous server was "+destServer.getServerName().getHostAndPort(),
- curServer.getServerName().getPort(), conn.getCachedLocation(TABLE_NAME, ROW).getPort());
+ curServer.getServerName().getPort(),
+ conn.getCachedLocation(TABLE_NAME, ROW).getRegionLocation().getPort());
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
@@ -551,7 +557,7 @@ public class TestHCM {
HConnectionManager.HConnectionImplementation conn =
(HConnectionManager.HConnectionImplementation)table.getConnection();
- HRegionLocation location = conn.getCachedLocation(TABLE_NAME2, ROW);
+ HRegionLocation location = conn.getCachedLocation(TABLE_NAME2, ROW).getRegionLocation();
assertNotNull(location);
ServerName anySource = ServerName.valueOf(location.getHostname(), location.getPort() - 1, 0L);
@@ -560,28 +566,28 @@ public class TestHCM {
int nextPort = location.getPort() + 1;
conn.updateCachedLocation(location.getRegionInfo(), location.getServerName(),
ServerName.valueOf("127.0.0.1", nextPort, 0), location.getSeqNum() - 1);
- location = conn.getCachedLocation(TABLE_NAME2, ROW);
+ location = conn.getCachedLocation(TABLE_NAME2, ROW).getRegionLocation();
Assert.assertEquals(nextPort, location.getPort());
// No source specified - same.
nextPort = location.getPort() + 1;
conn.updateCachedLocation(location.getRegionInfo(), location.getServerName(),
ServerName.valueOf("127.0.0.1", nextPort, 0), location.getSeqNum() - 1);
- location = conn.getCachedLocation(TABLE_NAME2, ROW);
+ location = conn.getCachedLocation(TABLE_NAME2, ROW).getRegionLocation();
Assert.assertEquals(nextPort, location.getPort());
// Higher seqNum - overwrites lower seqNum.
nextPort = location.getPort() + 1;
conn.updateCachedLocation(location.getRegionInfo(), anySource,
ServerName.valueOf("127.0.0.1", nextPort, 0), location.getSeqNum() + 1);
- location = conn.getCachedLocation(TABLE_NAME2, ROW);
+ location = conn.getCachedLocation(TABLE_NAME2, ROW).getRegionLocation();
Assert.assertEquals(nextPort, location.getPort());
// Lower seqNum - does not overwrite higher seqNum.
nextPort = location.getPort() + 1;
conn.updateCachedLocation(location.getRegionInfo(), anySource,
ServerName.valueOf("127.0.0.1", nextPort, 0), location.getSeqNum() - 1);
- location = conn.getCachedLocation(TABLE_NAME2, ROW);
+ location = conn.getCachedLocation(TABLE_NAME2, ROW).getRegionLocation();
Assert.assertEquals(nextPort - 1, location.getPort());
}
@@ -778,7 +784,7 @@ public class TestHCM {
table.put(put);
// Now moving the region to the second server
- HRegionLocation toMove = conn.getCachedLocation(TABLE_NAME3, ROW_X);
+ HRegionLocation toMove = conn.getCachedLocation(TABLE_NAME3, ROW_X).getRegionLocation();
byte[] regionName = toMove.getRegionInfo().getRegionName();
byte[] encodedRegionNameBytes = toMove.getRegionInfo().getEncodedNameAsBytes();
@@ -844,7 +850,8 @@ public class TestHCM {
// Cache was NOT updated and points to the wrong server
Assert.assertFalse(
- conn.getCachedLocation(TABLE_NAME3, ROW_X).getPort() == destServerName.getPort());
+ conn.getCachedLocation(TABLE_NAME3, ROW_X).getRegionLocation()
+ .getPort() == destServerName.getPort());
// Hijack the number of retry to fail after 2 tries
final int prevNumRetriesVal = setNumTries(conn, 2);
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
index 1ee256b..db4e5cf 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
@@ -18,6 +18,7 @@
*/
package org.apache.hadoop.hbase.regionserver;
+import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@@ -33,11 +34,15 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.MD5Hash;
import org.junit.Test;
import org.junit.experimental.categories.Category;
+import com.google.protobuf.ByteString;
+
@Category(SmallTests.class)
public class TestHRegionInfo {
@Test
@@ -101,7 +106,7 @@ public class TestHRegionInfo {
+ id + "." + md5HashInHex + ".",
nameStr);
}
-
+
@Test
public void testContainsRange() {
HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf("testtable"));
@@ -121,7 +126,7 @@ public class TestHRegionInfo {
assertFalse(hri.containsRange(Bytes.toBytes("g"), Bytes.toBytes("g")));
// Single row range entirely outside
assertFalse(hri.containsRange(Bytes.toBytes("z"), Bytes.toBytes("z")));
-
+
// Degenerate range
try {
hri.containsRange(Bytes.toBytes("z"), Bytes.toBytes("a"));
@@ -149,13 +154,106 @@ public class TestHRegionInfo {
public void testComparator() {
TableName tablename = TableName.valueOf("comparatorTablename");
byte[] empty = new byte[0];
- HRegionInfo older = new HRegionInfo(tablename, empty, empty, false, 0L);
- HRegionInfo newer = new HRegionInfo(tablename, empty, empty, false, 1L);
+ HRegionInfo older = new HRegionInfo(tablename, empty, empty, false, 0L);
+ HRegionInfo newer = new HRegionInfo(tablename, empty, empty, false, 1L);
assertTrue(older.compareTo(newer) < 0);
assertTrue(newer.compareTo(older) > 0);
assertTrue(older.compareTo(older) == 0);
assertTrue(newer.compareTo(newer) == 0);
}
-
+
+ @Test
+ public void testRegionNameForRegionReplicas() throws Exception {
+ String tableName = "tablename";
+ final TableName tn = TableName.valueOf(tableName);
+ String startKey = "startkey";
+ final byte[] sk = Bytes.toBytes(startKey);
+ String id = "id";
+
+ // assert with only the region name without encoding
+
+ // primary, replicaId = 0
+ byte [] name = HRegionInfo.createRegionName(tn, sk, Bytes.toBytes(id), 0, false);
+ String nameStr = Bytes.toString(name);
+ assertEquals(tableName + "," + startKey + "," + id, nameStr);
+
+ // replicaId = 1
+ name = HRegionInfo.createRegionName(tn, sk, Bytes.toBytes(id), 1, false);
+ nameStr = Bytes.toString(name);
+ assertEquals(tableName + "," + startKey + "," + id + "_" +
+ String.format(HRegionInfo.REPLICA_ID_FORMAT, 1), nameStr);
+
+ // replicaId = max
+ name = HRegionInfo.createRegionName(tn, sk, Bytes.toBytes(id), 0xFFFF, false);
+ nameStr = Bytes.toString(name);
+ assertEquals(tableName + "," + startKey + "," + id + "_" +
+ String.format(HRegionInfo.REPLICA_ID_FORMAT, 0xFFFF), nameStr);
+ }
+
+ @Test
+ public void testParseName() throws IOException {
+ TableName tableName = TableName.valueOf("testParseName");
+ byte[] startKey = Bytes.toBytes("startKey");
+ long regionId = System.currentTimeMillis();
+ int replicaId = 42;
+
+ // test without replicaId
+ byte[] regionName = HRegionInfo.createRegionName(tableName, startKey, regionId, false);
+
+ byte[][] fields = HRegionInfo.parseRegionName(regionName);
+ assertArrayEquals(Bytes.toString(fields[0]),tableName.getName(), fields[0]);
+ assertArrayEquals(Bytes.toString(fields[1]),startKey, fields[1]);
+ assertArrayEquals(Bytes.toString(fields[2]), Bytes.toBytes(Long.toString(regionId)),fields[2]);
+ assertEquals(3, fields.length);
+
+ // test with replicaId
+ regionName = HRegionInfo.createRegionName(tableName, startKey, regionId,
+ replicaId, false);
+
+ fields = HRegionInfo.parseRegionName(regionName);
+ assertArrayEquals(Bytes.toString(fields[0]),tableName.getName(), fields[0]);
+ assertArrayEquals(Bytes.toString(fields[1]),startKey, fields[1]);
+ assertArrayEquals(Bytes.toString(fields[2]), Bytes.toBytes(Long.toString(regionId)),fields[2]);
+ assertArrayEquals(Bytes.toString(fields[3]), Bytes.toBytes(
+ String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId)), fields[3]);
+ }
+
+ @Test
+ public void testConvert() {
+ TableName tableName = TableName.valueOf("ns1:table1");
+ byte[] startKey = Bytes.toBytes("startKey");
+ byte[] endKey = Bytes.toBytes("endKey");
+ boolean split = false;
+ long regionId = System.currentTimeMillis();
+ int replicaId = 42;
+
+
+ HRegionInfo hri = new HRegionInfo(tableName, startKey, endKey, split,
+ regionId, replicaId);
+
+ // convert two times, compare
+ HRegionInfo convertedHri = HRegionInfo.convert(HRegionInfo.convert(hri));
+
+ assertEquals(hri, convertedHri);
+
+ // test convert RegionInfo without replicaId
+ RegionInfo info = RegionInfo.newBuilder()
+ .setTableName(HBaseProtos.TableName.newBuilder()
+ .setQualifier(ByteString.copyFrom(tableName.getQualifier()))
+ .setNamespace(ByteString.copyFrom(tableName.getNamespace()))
+ .build())
+ .setStartKey(ByteString.copyFrom(startKey))
+ .setEndKey(ByteString.copyFrom(endKey))
+ .setSplit(split)
+ .setRegionId(regionId)
+ .build();
+
+ convertedHri = HRegionInfo.convert(info);
+ HRegionInfo expectedHri = new HRegionInfo(tableName, startKey, endKey, split,
+ regionId, 0); // expecting default replicaId
+
+ assertEquals(expectedHri, convertedHri);
+ }
+
}