diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index 78e175d..b6ee36f 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -29,7 +29,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import com.google.protobuf.HBaseZeroCopyByteString; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -47,15 +46,34 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.io.DataInputBuffer; +import com.google.protobuf.HBaseZeroCopyByteString; import com.google.protobuf.InvalidProtocolBufferException; /** - * HRegion information. - * Contains HRegion id, start and end keys, a reference to this HRegions' table descriptor, etc. + * Information about a region. A region is a range of keys in the whole keyspace of a table, an + * identifier (a timestamp) for differentiating between subset ranges (after region split) + * and a replicaId for differentiating the instance for the same range and some status information + * about the region. + * + * The region has a unique name which consists of the following fields: + *
hbase:meta.
*/
@InterfaceAudience.Private
public class MetaReader {
+
+ /*
+ * HBASE-10070 adds a replicaId to HRI, meaning more than one HRI can be defined for the
+ * same table range (table, startKey, endKey). For every range, there will be at least one
+ * HRI defined which is called default replica.
+ *
+ * Meta layout (as of 0.98 + HBASE-10070) is like:
+ * For each table range, there is a single row, formatted like:
+ * hbase:meta, skipping regions from any
- * tables in the specified set of disabled tables.
- * @param catalogTracker
- * @param disabledTables set of disabled tables that will not be returned
- * @return Returns a map of every region to it's currently assigned server,
- * according to META. If the region does not have an assignment it will have
- * a null value in the map.
- * @throws IOException
- */
- public static Maphbase:meta, skipping regions from any
- * tables in the specified set of disabled tables.
- * @param catalogTracker
- * @param disabledTables set of disabled tables that will not be returned
- * @param excludeOfflinedSplitParents If true, do not include offlined split
- * parents in the return.
- * @return Returns a map of every region to it's currently assigned server,
- * according to META. If the region does not have an assignment it will have
- * a null value in the map.
- * @throws IOException
- */
- public static Maphbase:meta.
@@ -206,33 +205,81 @@ public class MetaReader {
}
/**
- * Reads the location of the specified region
+ * Gets the region info and assignment for the specified region.
* @param catalogTracker
- * @param regionName region whose location we are after
- * @return location of region as a {@link ServerName} or null if not found
+ * @param regionName Region to lookup.
+ * @return Location and HRegionInfo for regionName
* @throws IOException
+ * @deprecated use {@link #getRegionLocation(CatalogTracker, byte[])} instead
*/
- static ServerName readRegionLocation(CatalogTracker catalogTracker,
- byte [] regionName)
+ @Deprecated
+ public static PairregionName
+ * @param regionName
+ * @return HRegionLocation for the given region
* @throws IOException
*/
- public static Pairqualifier of the catalog table result.
+ * @param r a Result object from the catalog table scan
+ * @param qualifier Column family qualifier
+ * @return An HRegionInfo instance or null.
+ */
+ private static HRegionInfo getHRegionInfo(final Result r, byte [] qualifier) {
+ Cell cell = r.getColumnLatestCell(getFamily(), qualifier);
+ if (cell == null) return null;
+ return HRegionInfo.parseFromOrNull(cell.getValueArray(),
+ cell.getValueOffset(), cell.getValueLength());
+ }
+
+ /**
+ * Returns the daughter regions by reading the corresponding columns of the catalog table
+ * Result.
+ * @param data a Result object from the catalog table scan
+ * @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split
+ * parent
+ */
+ public static PairOfSameTypeThis is NOT a connection to a particular server but to ALL servers in the cluster. Individual
* connections are managed at a lower level.
*
@@ -250,7 +250,9 @@ public interface HConnection extends Abortable, Closeable {
* @return HRegionLocation that describes where to find the region in
* question
* @throws IOException if a remote or network exception occurs
+ * @deprecated This is no longer a public API
*/
+ @Deprecated
public HRegionLocation locateRegion(final TableName tableName,
final byte [] row) throws IOException;
@@ -304,11 +306,12 @@ public interface HConnection extends Abortable, Closeable {
* Update the location cache. This is used internally by HBase, in most cases it should not be
* used by the client application.
* @param tableName the table name
+ * @param regionName the regionName
* @param rowkey the row
* @param exception the exception if any. Can be null.
* @param source the previous location
*/
- void updateCachedLocations(TableName tableName, byte[] rowkey,
+ void updateCachedLocations(TableName tableName, byte[] regionName, byte[] rowkey,
Object exception, ServerName source);
@Deprecated
@@ -344,7 +347,9 @@ public interface HConnection extends Abortable, Closeable {
* regions from returned list.
* @return list of region locations for all regions of table
* @throws IOException
+ * @deprecated This is no longer a public API
*/
+ @Deprecated
public List
* This is mainly useful for the MapReduce integration.
* @return A map of HRegionInfo with it's server address
* @throws IOException if a remote or network exception occurs
+ * @deprecated This is no longer a public API
*/
+ @Deprecated
public NavigableMap
- * Uses passed catalog tracker to get a connection to the server hosting
- * ROOT and makes edits to that region.
- *
- * @param catalogTracker catalog tracker
- * @param regionInfo region to update location of
- * @param sn Server name
- * @param openSeqNum the latest sequence number obtained when the region was open
- * @throws IOException
- * @throws ConnectException Usually because the regionserver carrying hbase:meta
- * is down.
- * @throws NullPointerException Because no -ROOT- server connection
- */
- public static void updateMetaLocation(CatalogTracker catalogTracker,
- HRegionInfo regionInfo, ServerName sn, long openSeqNum)
- throws IOException, ConnectException {
- updateLocation(catalogTracker, regionInfo, sn, openSeqNum);
- }
-
/**
* Updates the location of the specified region in hbase:meta to be the specified
* server hostname and startcode.
@@ -458,8 +433,9 @@ public class MetaEditor {
private static void updateLocation(final CatalogTracker catalogTracker,
HRegionInfo regionInfo, ServerName sn, long openSeqNum)
throws IOException {
- Put put = new Put(regionInfo.getRegionName());
- addLocation(put, sn, openSeqNum);
+ // region replicas are kept in the primary region's row
+ Put put = new Put(getMetaKeyForRegion(regionInfo));
+ addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
putToCatalogTable(catalogTracker, put);
LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
" with server=" + sn);
@@ -568,12 +544,12 @@ public class MetaEditor {
return p;
}
- private static Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
- p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
+ private static Put addLocation(final Put p, final ServerName sn, long openSeqNum, int replicaId){
+ p.addImmutable(HConstants.CATALOG_FAMILY, MetaReader.getServerColumn(replicaId),
Bytes.toBytes(sn.getHostAndPort()));
- p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
+ p.addImmutable(HConstants.CATALOG_FAMILY, MetaReader.getStartCodeColumn(replicaId),
Bytes.toBytes(sn.getStartcode()));
- p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER,
+ p.addImmutable(HConstants.CATALOG_FAMILY, MetaReader.getSeqNumColumn(replicaId),
Bytes.toBytes(openSeqNum));
return p;
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java
index 9d8724d..a76be1a 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java
@@ -31,10 +31,6 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
@@ -96,6 +92,7 @@ public class CoprocessorHConnection implements HConnection {
this.delegate = delegate;
}
+ @Override
public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService.BlockingInterface
getClient(ServerName serverName) throws IOException {
// client is trying to reach off-server, so we can't do anything special
@@ -128,260 +125,320 @@ public class CoprocessorHConnection implements HConnection {
return ClientService.newBlockingStub(channel);
}
+ @Override
public void abort(String why, Throwable e) {
delegate.abort(why, e);
}
+ @Override
public boolean isAborted() {
return delegate.isAborted();
}
+ @Override
public Configuration getConfiguration() {
return delegate.getConfiguration();
}
+ @Override
public HTableInterface getTable(String tableName) throws IOException {
return delegate.getTable(tableName);
}
+ @Override
public HTableInterface getTable(byte[] tableName) throws IOException {
return delegate.getTable(tableName);
}
+ @Override
public HTableInterface getTable(TableName tableName) throws IOException {
return delegate.getTable(tableName);
}
+ @Override
public HTableInterface getTable(String tableName, ExecutorService pool) throws IOException {
return delegate.getTable(tableName, pool);
}
+ @Override
public HTableInterface getTable(byte[] tableName, ExecutorService pool) throws IOException {
return delegate.getTable(tableName, pool);
}
+ @Override
public HTableInterface getTable(TableName tableName, ExecutorService pool) throws IOException {
return delegate.getTable(tableName, pool);
}
+ @Override
public boolean isMasterRunning() throws MasterNotRunningException, ZooKeeperConnectionException {
return delegate.isMasterRunning();
}
+ @Override
public boolean isTableEnabled(TableName tableName) throws IOException {
return delegate.isTableEnabled(tableName);
}
+ @Override
public boolean isTableEnabled(byte[] tableName) throws IOException {
return delegate.isTableEnabled(tableName);
}
+ @Override
public boolean isTableDisabled(TableName tableName) throws IOException {
return delegate.isTableDisabled(tableName);
}
+ @Override
public boolean isTableDisabled(byte[] tableName) throws IOException {
return delegate.isTableDisabled(tableName);
}
+ @Override
public boolean isTableAvailable(TableName tableName) throws IOException {
return delegate.isTableAvailable(tableName);
}
+ @Override
public boolean isTableAvailable(byte[] tableName) throws IOException {
return delegate.isTableAvailable(tableName);
}
+ @Override
public boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException {
return delegate.isTableAvailable(tableName, splitKeys);
}
+ @Override
public boolean isTableAvailable(byte[] tableName, byte[][] splitKeys) throws IOException {
return delegate.isTableAvailable(tableName, splitKeys);
}
+ @Override
public HTableDescriptor[] listTables() throws IOException {
return delegate.listTables();
}
+ @Override
public String[] getTableNames() throws IOException {
return delegate.getTableNames();
}
+ @Override
public TableName[] listTableNames() throws IOException {
return delegate.listTableNames();
}
+ @Override
public HTableDescriptor getHTableDescriptor(TableName tableName) throws IOException {
return delegate.getHTableDescriptor(tableName);
}
+ @Override
public HTableDescriptor getHTableDescriptor(byte[] tableName) throws IOException {
return delegate.getHTableDescriptor(tableName);
}
+ @Override
public HRegionLocation locateRegion(TableName tableName, byte[] row) throws IOException {
return delegate.locateRegion(tableName, row);
}
+ @Override
public HRegionLocation locateRegion(byte[] tableName, byte[] row) throws IOException {
return delegate.locateRegion(tableName, row);
}
+ @Override
public void clearRegionCache() {
delegate.clearRegionCache();
}
+ @Override
public void clearRegionCache(TableName tableName) {
delegate.clearRegionCache(tableName);
}
+ @Override
public void clearRegionCache(byte[] tableName) {
delegate.clearRegionCache(tableName);
}
+ @Override
public HRegionLocation relocateRegion(TableName tableName, byte[] row) throws IOException {
return delegate.relocateRegion(tableName, row);
}
+ @Override
public HRegionLocation relocateRegion(byte[] tableName, byte[] row) throws IOException {
return delegate.relocateRegion(tableName, row);
}
- public void updateCachedLocations(TableName tableName, byte[] rowkey, Object exception,
- ServerName source) {
- delegate.updateCachedLocations(tableName, rowkey, exception, source);
+ @Override
+ public void updateCachedLocations(TableName tableName, byte[] regionName, byte[] rowkey,
+ Object exception, ServerName source) {
+ delegate.updateCachedLocations(tableName, regionName, rowkey, exception, source);
}
+ @Override
public void updateCachedLocations(TableName tableName, byte[] rowkey, Object exception,
HRegionLocation source) {
delegate.updateCachedLocations(tableName, rowkey, exception, source);
}
+ @Override
public void updateCachedLocations(byte[] tableName, byte[] rowkey, Object exception,
HRegionLocation source) {
delegate.updateCachedLocations(tableName, rowkey, exception, source);
}
+ @Override
public HRegionLocation locateRegion(byte[] regionName) throws IOException {
return delegate.locateRegion(regionName);
}
+ @Override
public ListtableName
- */
- private ConcurrentSkipListMap, List
, List
tableName
+ */
+ private ConcurrentSkipListMapoptional bool split = 6;
*/
boolean getSplit();
+
+ // optional int32 replica_id = 7 [default = 0];
+ /**
+ * optional int32 replica_id = 7 [default = 0];
+ */
+ boolean hasReplicaId();
+ /**
+ * optional int32 replica_id = 7 [default = 0];
+ */
+ int getReplicaId();
}
/**
* Protobuf type {@code RegionInfo}
@@ -3782,6 +3792,11 @@ public final class HBaseProtos {
split_ = input.readBool();
break;
}
+ case 56: {
+ bitField0_ |= 0x00000040;
+ replicaId_ = input.readInt32();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -3924,6 +3939,22 @@ public final class HBaseProtos {
return split_;
}
+ // optional int32 replica_id = 7 [default = 0];
+ public static final int REPLICA_ID_FIELD_NUMBER = 7;
+ private int replicaId_;
+ /**
+ * optional int32 replica_id = 7 [default = 0];
+ */
+ public boolean hasReplicaId() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * optional int32 replica_id = 7 [default = 0];
+ */
+ public int getReplicaId() {
+ return replicaId_;
+ }
+
private void initFields() {
regionId_ = 0L;
tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
@@ -3931,6 +3962,7 @@ public final class HBaseProtos {
endKey_ = com.google.protobuf.ByteString.EMPTY;
offline_ = false;
split_ = false;
+ replicaId_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -3974,6 +4006,9 @@ public final class HBaseProtos {
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeBool(6, split_);
}
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ output.writeInt32(7, replicaId_);
+ }
getUnknownFields().writeTo(output);
}
@@ -4007,6 +4042,10 @@ public final class HBaseProtos {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(6, split_);
}
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(7, replicaId_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -4060,6 +4099,11 @@ public final class HBaseProtos {
result = result && (getSplit()
== other.getSplit());
}
+ result = result && (hasReplicaId() == other.hasReplicaId());
+ if (hasReplicaId()) {
+ result = result && (getReplicaId()
+ == other.getReplicaId());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -4097,6 +4141,10 @@ public final class HBaseProtos {
hash = (37 * hash) + SPLIT_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getSplit());
}
+ if (hasReplicaId()) {
+ hash = (37 * hash) + REPLICA_ID_FIELD_NUMBER;
+ hash = (53 * hash) + getReplicaId();
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -4228,6 +4276,8 @@ public final class HBaseProtos {
bitField0_ = (bitField0_ & ~0x00000010);
split_ = false;
bitField0_ = (bitField0_ & ~0x00000020);
+ replicaId_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
@@ -4284,6 +4334,10 @@ public final class HBaseProtos {
to_bitField0_ |= 0x00000020;
}
result.split_ = split_;
+ if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
+ to_bitField0_ |= 0x00000040;
+ }
+ result.replicaId_ = replicaId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -4318,6 +4372,9 @@ public final class HBaseProtos {
if (other.hasSplit()) {
setSplit(other.getSplit());
}
+ if (other.hasReplicaId()) {
+ setReplicaId(other.getReplicaId());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -4645,6 +4702,39 @@ public final class HBaseProtos {
return this;
}
+ // optional int32 replica_id = 7 [default = 0];
+ private int replicaId_ ;
+ /**
+ * optional int32 replica_id = 7 [default = 0];
+ */
+ public boolean hasReplicaId() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * optional int32 replica_id = 7 [default = 0];
+ */
+ public int getReplicaId() {
+ return replicaId_;
+ }
+ /**
+ * optional int32 replica_id = 7 [default = 0];
+ */
+ public Builder setReplicaId(int value) {
+ bitField0_ |= 0x00000040;
+ replicaId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int32 replica_id = 7 [default = 0];
+ */
+ public Builder clearReplicaId() {
+ bitField0_ = (bitField0_ & ~0x00000040);
+ replicaId_ = 0;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:RegionInfo)
}
@@ -15778,41 +15868,42 @@ public final class HBaseProtos {
"ingPair\"o\n\022ColumnFamilySchema\022\014\n\004name\030\001 " +
"\002(\014\022#\n\nattributes\030\002 \003(\0132\017.BytesBytesPair" +
"\022&\n\rconfiguration\030\003 \003(\0132\017.NameStringPair" +
- "\"\203\001\n\nRegionInfo\022\021\n\tregion_id\030\001 \002(\004\022\036\n\nta",
+ "\"\232\001\n\nRegionInfo\022\021\n\tregion_id\030\001 \002(\004\022\036\n\nta",
"ble_name\030\002 \002(\0132\n.TableName\022\021\n\tstart_key\030" +
"\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007offline\030\005 \001(\010\022" +
- "\r\n\005split\030\006 \001(\010\"1\n\014FavoredNodes\022!\n\014favore" +
- "d_node\030\001 \003(\0132\013.ServerName\"\225\001\n\017RegionSpec" +
- "ifier\0222\n\004type\030\001 \002(\0162$.RegionSpecifier.Re" +
- "gionSpecifierType\022\r\n\005value\030\002 \002(\014\"?\n\023Regi" +
- "onSpecifierType\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCO" +
- "DED_REGION_NAME\020\002\"%\n\tTimeRange\022\014\n\004from\030\001" +
- " \001(\004\022\n\n\002to\030\002 \001(\004\"A\n\nServerName\022\021\n\thost_n" +
- "ame\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_code\030\003 ",
- "\001(\004\"\033\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016Name" +
- "StringPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\"" +
- ",\n\rNameBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030" +
- "\002 \001(\014\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016" +
- "\n\006second\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030" +
- "\001 \001(\t\022\r\n\005value\030\002 \001(\003\"\256\001\n\023SnapshotDescrip" +
- "tion\022\014\n\004name\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcre" +
- "ation_time\030\003 \001(\003:\0010\022.\n\004type\030\004 \001(\0162\031.Snap" +
- "shotDescription.Type:\005FLUSH\022\017\n\007version\030\005" +
- " \001(\005\"\037\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH\020\001\"}\n",
- "\024ProcedureDescription\022\021\n\tsignature\030\001 \002(\t" +
- "\022\020\n\010instance\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(" +
- "\003:\0010\022&\n\rconfiguration\030\004 \003(\0132\017.NameString" +
- "Pair\"\n\n\010EmptyMsg\"\033\n\007LongMsg\022\020\n\010long_msg\030" +
- "\001 \002(\003\"\'\n\rBigDecimalMsg\022\026\n\016bigdecimal_msg" +
- "\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig_bits\030\001 \002(\004\022\025" +
- "\n\rmost_sig_bits\030\002 \002(\004\"K\n\023NamespaceDescri" +
- "ptor\022\014\n\004name\030\001 \002(\014\022&\n\rconfiguration\030\002 \003(" +
- "\0132\017.NameStringPair\"$\n\020RegionServerInfo\022\020" +
- "\n\010infoPort\030\001 \001(\005*r\n\013CompareType\022\010\n\004LESS\020",
- "\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_E" +
- "QUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER\020" +
- "\005\022\t\n\005NO_OP\020\006B>\n*org.apache.hadoop.hbase." +
- "protobuf.generatedB\013HBaseProtosH\001\240\001\001"
+ "\r\n\005split\030\006 \001(\010\022\025\n\nreplica_id\030\007 \001(\005:\0010\"1\n" +
+ "\014FavoredNodes\022!\n\014favored_node\030\001 \003(\0132\013.Se" +
+ "rverName\"\225\001\n\017RegionSpecifier\0222\n\004type\030\001 \002" +
+ "(\0162$.RegionSpecifier.RegionSpecifierType" +
+ "\022\r\n\005value\030\002 \002(\014\"?\n\023RegionSpecifierType\022\017" +
+ "\n\013REGION_NAME\020\001\022\027\n\023ENCODED_REGION_NAME\020\002" +
+ "\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"" +
+ "A\n\nServerName\022\021\n\thost_name\030\001 \002(\t\022\014\n\004port",
+ "\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033\n\013Coprocesso" +
+ "r\022\014\n\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n\004nam" +
+ "e\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesPair\022" +
+ "\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016BytesByt" +
+ "esPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(\014\",\n" +
+ "\rNameInt64Pair\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002 " +
+ "\001(\003\"\256\001\n\023SnapshotDescription\022\014\n\004name\030\001 \002(" +
+ "\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(\003:" +
+ "\0010\022.\n\004type\030\004 \001(\0162\031.SnapshotDescription.T" +
+ "ype:\005FLUSH\022\017\n\007version\030\005 \001(\005\"\037\n\004Type\022\014\n\010D",
+ "ISABLED\020\000\022\t\n\005FLUSH\020\001\"}\n\024ProcedureDescrip" +
+ "tion\022\021\n\tsignature\030\001 \002(\t\022\020\n\010instance\030\002 \001(" +
+ "\t\022\030\n\rcreation_time\030\003 \001(\003:\0010\022&\n\rconfigura" +
+ "tion\030\004 \003(\0132\017.NameStringPair\"\n\n\010EmptyMsg\"" +
+ "\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\'\n\rBigDecim" +
+ "alMsg\022\026\n\016bigdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n" +
+ "\016least_sig_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002" +
+ " \002(\004\"K\n\023NamespaceDescriptor\022\014\n\004name\030\001 \002(" +
+ "\014\022&\n\rconfiguration\030\002 \003(\0132\017.NameStringPai" +
+ "r\"$\n\020RegionServerInfo\022\020\n\010infoPort\030\001 \001(\005*",
+ "r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUA" +
+ "L\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER" +
+ "_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006B>\n*o" +
+ "rg.apache.hadoop.hbase.protobuf.generate" +
+ "dB\013HBaseProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -15842,7 +15933,7 @@ public final class HBaseProtos {
internal_static_RegionInfo_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionInfo_descriptor,
- new java.lang.String[] { "RegionId", "TableName", "StartKey", "EndKey", "Offline", "Split", });
+ new java.lang.String[] { "RegionId", "TableName", "StartKey", "EndKey", "Offline", "Split", "ReplicaId", });
internal_static_FavoredNodes_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_FavoredNodes_fieldAccessorTable = new
diff --git hbase-protocol/src/main/protobuf/HBase.proto hbase-protocol/src/main/protobuf/HBase.proto
index a966c40..4874fc2 100644
--- hbase-protocol/src/main/protobuf/HBase.proto
+++ hbase-protocol/src/main/protobuf/HBase.proto
@@ -64,6 +64,7 @@ message RegionInfo {
optional bytes end_key = 4;
optional bool offline = 5;
optional bool split = 6;
+ optional int32 replica_id = 7 [default = 0];
}
/**
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
index 89fa0a9..5b5446b 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.catalog;
import java.io.IOException;
import java.io.InterruptedIOException;
-import java.net.ConnectException;
import java.util.ArrayList;
import java.util.List;
@@ -47,11 +46,10 @@ import com.google.protobuf.ServiceException;
/**
* Writes region and assignment information to hbase:meta.
- * TODO: Put MetaReader and MetaEditor together; doesn't make sense having
- * them distinct. see HBASE-3475.
*/
@InterfaceAudience.Private
-public class MetaEditor {
+public class MetaEditor extends MetaReader {
+
// TODO: Strip CatalogTracker from this class. Its all over and in the end
// its only used to get its Configuration so we can get associated
// Connection.
@@ -290,7 +288,7 @@ public class MetaEditor {
Put put = new Put(regionInfo.getRegionName());
addRegionInfo(put, regionInfo);
if (sn != null) {
- addLocation(put, sn, openSeqNum);
+ addLocation(put, sn, openSeqNum, regionInfo.getReplicaId());
}
putToMetaTable(catalogTracker, put);
LOG.info("Added daughter " + regionInfo.getEncodedName() +
@@ -327,7 +325,7 @@ public class MetaEditor {
Delete deleteB = makeDeleteFromRegionInfo(regionB);
// The merged is a new region, openSeqNum = 1 is fine.
- addLocation(putOfMerged, sn, 1);
+ addLocation(putOfMerged, sn, 1, mergedRegion.getReplicaId());
byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString()
+ HConstants.DELIMITER);
@@ -365,8 +363,8 @@ public class MetaEditor {
Put putA = makePutFromRegionInfo(splitA);
Put putB = makePutFromRegionInfo(splitB);
- addLocation(putA, sn, 1); //these are new regions, openSeqNum = 1 is fine.
- addLocation(putB, sn, 1);
+ addLocation(putA, sn, 1, splitA.getReplicaId()); //new regions, openSeqNum = 1 is fine.
+ addLocation(putB, sn, 1, splitB.getReplicaId());
byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
multiMutate(meta, tableRow, putParent, putA, putB);
@@ -401,29 +399,6 @@ public class MetaEditor {
}
}
-
- /**
- * Updates the location of the specified hbase:meta region in ROOT to be the
- * specified server hostname and startcode.
- *