done) {
channel.callMethod(
- getDescriptor().getMethods().get(11),
+ getDescriptor().getMethods().get(12),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(),
@@ -16083,6 +17138,11 @@
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest request)
throws com.google.protobuf.ServiceException;
+ public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse mergeRegions(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest request)
+ throws com.google.protobuf.ServiceException;
+
public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse replicateWALEntry(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request)
@@ -16207,12 +17267,24 @@
}
+ public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse mergeRegions(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(8),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance());
+ }
+
+
public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse replicateWALEntry(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(8),
+ getDescriptor().getMethods().get(9),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance());
@@ -16224,7 +17296,7 @@
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(9),
+ getDescriptor().getMethods().get(10),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance());
@@ -16236,7 +17308,7 @@
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(10),
+ getDescriptor().getMethods().get(11),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance());
@@ -16248,7 +17320,7 @@
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(11),
+ getDescriptor().getMethods().get(12),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance());
@@ -16343,6 +17415,16 @@
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_CompactRegionResponse_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_MergeRegionsRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_MergeRegionsRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_MergeRegionsResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_MergeRegionsResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
internal_static_UUID_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -16455,49 +17537,54 @@
"tRegionResponse\"W\n\024CompactRegionRequest\022" +
" \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\r\n\005maj" +
"or\030\002 \001(\010\022\016\n\006family\030\003 \001(\014\"\027\n\025CompactRegio" +
- "nResponse\"1\n\004UUID\022\024\n\014leastSigBits\030\001 \002(\004\022" +
- "\023\n\013mostSigBits\030\002 \002(\004\"\270\003\n\010WALEntry\022\035\n\003key" +
- "\030\001 \002(\0132\020.WALEntry.WALKey\022\037\n\004edit\030\002 \002(\0132\021" +
- ".WALEntry.WALEdit\032~\n\006WALKey\022\031\n\021encodedRe" +
- "gionName\030\001 \002(\014\022\021\n\ttableName\030\002 \002(\014\022\031\n\021log" +
- "SequenceNumber\030\003 \002(\004\022\021\n\twriteTime\030\004 \002(\004\022",
- "\030\n\tclusterId\030\005 \001(\0132\005.UUID\032\353\001\n\007WALEdit\022\025\n" +
- "\rkeyValueBytes\030\001 \003(\014\0222\n\013familyScope\030\002 \003(" +
- "\0132\035.WALEntry.WALEdit.FamilyScope\032M\n\013Fami" +
- "lyScope\022\016\n\006family\030\001 \002(\014\022.\n\tscopeType\030\002 \002" +
- "(\0162\033.WALEntry.WALEdit.ScopeType\"F\n\tScope" +
- "Type\022\033\n\027REPLICATION_SCOPE_LOCAL\020\000\022\034\n\030REP" +
- "LICATION_SCOPE_GLOBAL\020\001\"4\n\030ReplicateWALE" +
- "ntryRequest\022\030\n\005entry\030\001 \003(\0132\t.WALEntry\"\033\n" +
- "\031ReplicateWALEntryResponse\"\026\n\024RollWALWri" +
- "terRequest\".\n\025RollWALWriterResponse\022\025\n\rr",
- "egionToFlush\030\001 \003(\014\"#\n\021StopServerRequest\022" +
- "\016\n\006reason\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n" +
- "\024GetServerInfoRequest\"@\n\nServerInfo\022\037\n\ns" +
- "erverName\030\001 \002(\0132\013.ServerName\022\021\n\twebuiPor" +
- "t\030\002 \001(\r\"8\n\025GetServerInfoResponse\022\037\n\nserv" +
- "erInfo\030\001 \002(\0132\013.ServerInfo2\371\005\n\014AdminServi" +
- "ce\022>\n\rgetRegionInfo\022\025.GetRegionInfoReque" +
- "st\032\026.GetRegionInfoResponse\022;\n\014getStoreFi" +
- "le\022\024.GetStoreFileRequest\032\025.GetStoreFileR" +
- "esponse\022D\n\017getOnlineRegion\022\027.GetOnlineRe",
- "gionRequest\032\030.GetOnlineRegionResponse\0225\n" +
- "\nopenRegion\022\022.OpenRegionRequest\032\023.OpenRe" +
- "gionResponse\0228\n\013closeRegion\022\023.CloseRegio" +
- "nRequest\032\024.CloseRegionResponse\0228\n\013flushR" +
- "egion\022\023.FlushRegionRequest\032\024.FlushRegion" +
- "Response\0228\n\013splitRegion\022\023.SplitRegionReq" +
- "uest\032\024.SplitRegionResponse\022>\n\rcompactReg" +
- "ion\022\025.CompactRegionRequest\032\026.CompactRegi" +
- "onResponse\022J\n\021replicateWALEntry\022\031.Replic" +
- "ateWALEntryRequest\032\032.ReplicateWALEntryRe",
- "sponse\022>\n\rrollWALWriter\022\025.RollWALWriterR" +
- "equest\032\026.RollWALWriterResponse\022>\n\rgetSer" +
- "verInfo\022\025.GetServerInfoRequest\032\026.GetServ" +
- "erInfoResponse\0225\n\nstopServer\022\022.StopServe" +
- "rRequest\032\023.StopServerResponseBA\n*org.apa" +
- "che.hadoop.hbase.protobuf.generatedB\013Adm" +
- "inProtosH\001\210\001\001\240\001\001"
+ "nResponse\"t\n\023MergeRegionsRequest\022!\n\007regi" +
+ "onA\030\001 \002(\0132\020.RegionSpecifier\022!\n\007regionB\030\002" +
+ " \002(\0132\020.RegionSpecifier\022\027\n\010forcible\030\003 \001(\010" +
+ ":\005false\"\026\n\024MergeRegionsResponse\"1\n\004UUID\022" +
+ "\024\n\014leastSigBits\030\001 \002(\004\022\023\n\013mostSigBits\030\002 \002" +
+ "(\004\"\270\003\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020.WALEntry.",
+ "WALKey\022\037\n\004edit\030\002 \002(\0132\021.WALEntry.WALEdit\032" +
+ "~\n\006WALKey\022\031\n\021encodedRegionName\030\001 \002(\014\022\021\n\t" +
+ "tableName\030\002 \002(\014\022\031\n\021logSequenceNumber\030\003 \002" +
+ "(\004\022\021\n\twriteTime\030\004 \002(\004\022\030\n\tclusterId\030\005 \001(\013" +
+ "2\005.UUID\032\353\001\n\007WALEdit\022\025\n\rkeyValueBytes\030\001 \003" +
+ "(\014\0222\n\013familyScope\030\002 \003(\0132\035.WALEntry.WALEd" +
+ "it.FamilyScope\032M\n\013FamilyScope\022\016\n\006family\030" +
+ "\001 \002(\014\022.\n\tscopeType\030\002 \002(\0162\033.WALEntry.WALE" +
+ "dit.ScopeType\"F\n\tScopeType\022\033\n\027REPLICATIO" +
+ "N_SCOPE_LOCAL\020\000\022\034\n\030REPLICATION_SCOPE_GLO",
+ "BAL\020\001\"4\n\030ReplicateWALEntryRequest\022\030\n\005ent" +
+ "ry\030\001 \003(\0132\t.WALEntry\"\033\n\031ReplicateWALEntry" +
+ "Response\"\026\n\024RollWALWriterRequest\".\n\025Roll" +
+ "WALWriterResponse\022\025\n\rregionToFlush\030\001 \003(\014" +
+ "\"#\n\021StopServerRequest\022\016\n\006reason\030\001 \002(\t\"\024\n" +
+ "\022StopServerResponse\"\026\n\024GetServerInfoRequ" +
+ "est\"@\n\nServerInfo\022\037\n\nserverName\030\001 \002(\0132\013." +
+ "ServerName\022\021\n\twebuiPort\030\002 \001(\r\"8\n\025GetServ" +
+ "erInfoResponse\022\037\n\nserverInfo\030\001 \002(\0132\013.Ser" +
+ "verInfo2\266\006\n\014AdminService\022>\n\rgetRegionInf",
+ "o\022\025.GetRegionInfoRequest\032\026.GetRegionInfo" +
+ "Response\022;\n\014getStoreFile\022\024.GetStoreFileR" +
+ "equest\032\025.GetStoreFileResponse\022D\n\017getOnli" +
+ "neRegion\022\027.GetOnlineRegionRequest\032\030.GetO" +
+ "nlineRegionResponse\0225\n\nopenRegion\022\022.Open" +
+ "RegionRequest\032\023.OpenRegionResponse\0228\n\013cl" +
+ "oseRegion\022\023.CloseRegionRequest\032\024.CloseRe" +
+ "gionResponse\0228\n\013flushRegion\022\023.FlushRegio" +
+ "nRequest\032\024.FlushRegionResponse\0228\n\013splitR" +
+ "egion\022\023.SplitRegionRequest\032\024.SplitRegion",
+ "Response\022>\n\rcompactRegion\022\025.CompactRegio" +
+ "nRequest\032\026.CompactRegionResponse\022;\n\014merg" +
+ "eRegions\022\024.MergeRegionsRequest\032\025.MergeRe" +
+ "gionsResponse\022J\n\021replicateWALEntry\022\031.Rep" +
+ "licateWALEntryRequest\032\032.ReplicateWALEntr" +
+ "yResponse\022>\n\rrollWALWriter\022\025.RollWALWrit" +
+ "erRequest\032\026.RollWALWriterResponse\022>\n\rget" +
+ "ServerInfo\022\025.GetServerInfoRequest\032\026.GetS" +
+ "erverInfoResponse\0225\n\nstopServer\022\022.StopSe" +
+ "rverRequest\032\023.StopServerResponseBA\n*org.",
+ "apache.hadoop.hbase.protobuf.generatedB\013" +
+ "AdminProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -16640,8 +17727,24 @@
new java.lang.String[] { },
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.class,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.Builder.class);
+ internal_static_MergeRegionsRequest_descriptor =
+ getDescriptor().getMessageTypes().get(16);
+ internal_static_MergeRegionsRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_MergeRegionsRequest_descriptor,
+ new java.lang.String[] { "RegionA", "RegionB", "Forcible", },
+ org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.class,
+ org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.Builder.class);
+ internal_static_MergeRegionsResponse_descriptor =
+ getDescriptor().getMessageTypes().get(17);
+ internal_static_MergeRegionsResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_MergeRegionsResponse_descriptor,
+ new java.lang.String[] { },
+ org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.Builder.class);
internal_static_UUID_descriptor =
- getDescriptor().getMessageTypes().get(16);
+ getDescriptor().getMessageTypes().get(18);
internal_static_UUID_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_UUID_descriptor,
@@ -16649,7 +17752,7 @@
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UUID.class,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UUID.Builder.class);
internal_static_WALEntry_descriptor =
- getDescriptor().getMessageTypes().get(17);
+ getDescriptor().getMessageTypes().get(19);
internal_static_WALEntry_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_WALEntry_descriptor,
@@ -16681,7 +17784,7 @@
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALEdit.FamilyScope.class,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.WALEdit.FamilyScope.Builder.class);
internal_static_ReplicateWALEntryRequest_descriptor =
- getDescriptor().getMessageTypes().get(18);
+ getDescriptor().getMessageTypes().get(20);
internal_static_ReplicateWALEntryRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ReplicateWALEntryRequest_descriptor,
@@ -16689,7 +17792,7 @@
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.class,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.Builder.class);
internal_static_ReplicateWALEntryResponse_descriptor =
- getDescriptor().getMessageTypes().get(19);
+ getDescriptor().getMessageTypes().get(21);
internal_static_ReplicateWALEntryResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ReplicateWALEntryResponse_descriptor,
@@ -16697,7 +17800,7 @@
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.class,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.Builder.class);
internal_static_RollWALWriterRequest_descriptor =
- getDescriptor().getMessageTypes().get(20);
+ getDescriptor().getMessageTypes().get(22);
internal_static_RollWALWriterRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RollWALWriterRequest_descriptor,
@@ -16705,7 +17808,7 @@
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.class,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.Builder.class);
internal_static_RollWALWriterResponse_descriptor =
- getDescriptor().getMessageTypes().get(21);
+ getDescriptor().getMessageTypes().get(23);
internal_static_RollWALWriterResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RollWALWriterResponse_descriptor,
@@ -16713,7 +17816,7 @@
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.class,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.Builder.class);
internal_static_StopServerRequest_descriptor =
- getDescriptor().getMessageTypes().get(22);
+ getDescriptor().getMessageTypes().get(24);
internal_static_StopServerRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_StopServerRequest_descriptor,
@@ -16721,7 +17824,7 @@
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.class,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.Builder.class);
internal_static_StopServerResponse_descriptor =
- getDescriptor().getMessageTypes().get(23);
+ getDescriptor().getMessageTypes().get(25);
internal_static_StopServerResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_StopServerResponse_descriptor,
@@ -16729,7 +17832,7 @@
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.class,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.Builder.class);
internal_static_GetServerInfoRequest_descriptor =
- getDescriptor().getMessageTypes().get(24);
+ getDescriptor().getMessageTypes().get(26);
internal_static_GetServerInfoRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_GetServerInfoRequest_descriptor,
@@ -16737,7 +17840,7 @@
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.class,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.Builder.class);
internal_static_ServerInfo_descriptor =
- getDescriptor().getMessageTypes().get(25);
+ getDescriptor().getMessageTypes().get(27);
internal_static_ServerInfo_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ServerInfo_descriptor,
@@ -16745,7 +17848,7 @@
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo.class,
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo.Builder.class);
internal_static_GetServerInfoResponse_descriptor =
- getDescriptor().getMessageTypes().get(26);
+ getDescriptor().getMessageTypes().get(28);
internal_static_GetServerInfoResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_GetServerInfoResponse_descriptor,
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (revision 1460302)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (working copy)
@@ -669,6 +669,36 @@
return sendRegionClose(server, region, versionOfClosingNode, null, true);
}
+ /**
+ * Sends an MERGE REGIONS RPC to the specified server to merge the specified
+ * regions.
+ *
+ * A region server could reject the close request because it either does not
+ * have the specified region.
+ * @param server server to merge regions
+ * @param region_a region to merge
+ * @param region_b region to merge
+ * @param forcible true if do a compulsory merge, otherwise we will only merge
+ * two adjacent regions
+ * @throws IOException
+ */
+ public void sendRegionsMerge(ServerName server, HRegionInfo region_a,
+ HRegionInfo region_b, boolean forcible) throws IOException {
+ if (server == null)
+ throw new NullPointerException("Passed server is null");
+ if (region_a == null || region_b == null)
+ throw new NullPointerException("Passed region is null");
+ AdminProtocol admin = getServerConnection(server);
+ if (admin == null) {
+ throw new IOException("Attempting to send MERGE REGIONS RPC to server "
+ + server.toString() + " for region "
+ + region_a.getRegionNameAsString() + ","
+ + region_b.getRegionNameAsString()
+ + " failed because no RPC connection found to this server");
+ }
+ ProtobufUtil.mergeRegions(admin, region_a, region_b, forcible);
+ }
+
/**
* @param sn
* @return
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (revision 1460302)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (working copy)
@@ -39,36 +39,36 @@
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
-import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Chore;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.exceptions.NotServingRegionException;
import org.apache.hadoop.hbase.RegionTransition;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException;
-import org.apache.hadoop.hbase.exceptions.TableNotFoundException;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.exceptions.NotServingRegionException;
+import org.apache.hadoop.hbase.exceptions.RegionAlreadyInTransitionException;
+import org.apache.hadoop.hbase.exceptions.RegionServerStoppedException;
+import org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.exceptions.TableNotFoundException;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.master.handler.ClosedRegionHandler;
import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
+import org.apache.hadoop.hbase.master.handler.MergedRegionHandler;
import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler;
import org.apache.hadoop.hbase.master.handler.SplitRegionHandler;
-import org.apache.hadoop.hbase.exceptions.RegionAlreadyInTransitionException;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
-import org.apache.hadoop.hbase.exceptions.RegionServerStoppedException;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.KeyLocker;
import org.apache.hadoop.hbase.util.Pair;
@@ -85,6 +85,7 @@
import org.apache.zookeeper.KeeperException.NodeExistsException;
import org.apache.zookeeper.data.Stat;
+import com.google.common.base.Preconditions;
import com.google.common.collect.LinkedHashMultimap;
/**
@@ -623,6 +624,24 @@
// multiple times so if it's still up we will receive an update soon.
}
break;
+ case RS_ZK_REGION_MERGING:
+ // nothing to do
+ LOG.info("Processed region " + regionInfo.getEncodedName()
+ + " in state : " + et + " nothing to do.");
+ break;
+ case RS_ZK_REGION_MERGE:
+ if (!serverManager.isServerOnline(sn)) {
+ // ServerShutdownHandler would handle this region
+ LOG.warn("Processed region " + regionInfo.getEncodedName()
+ + " in state : " + et + " on a dead regionserver: " + sn
+ + " doing nothing");
+ } else {
+ LOG.info("Processed region " + regionInfo.getEncodedName() + " in state : " +
+ et + " nothing to do.");
+ // We don't do anything. The regionserver is supposed to update the znode
+ // multiple times so if it's still up we will receive an update soon.
+ }
+ break;
default:
throw new IllegalStateException("Received region in state :" + et + " is not valid.");
}
@@ -783,6 +802,34 @@
regionState.getRegion(), sn, daughters));
break;
+ case RS_ZK_REGION_MERGING:
+ // Merged region is a new region, we can't find it in the region states now.
+ // Do nothing.
+ break;
+
+ case RS_ZK_REGION_MERGE:
+ // Assert that we can get a serverinfo for this server.
+ if (!this.serverManager.isServerOnline(sn)) {
+ LOG.error("Dropped merge! ServerName=" + sn + " unknown.");
+ break;
+ }
+ // Get merged and merging regions.
+ byte[] payloadOfMerge = rt.getPayload();
+ List mergeRegions;
+ try {
+ mergeRegions = HRegionInfo.parseDelimitedFrom(payloadOfMerge, 0,
+ payloadOfMerge.length);
+ } catch (IOException e) {
+ LOG.error("Dropped merge! Failed reading merge payload for " +
+ prettyPrintedRegionName);
+ break;
+ }
+ assert mergeRegions.size() == 3;
+ // Run handler to do the rest of the MERGE handling.
+ this.executorService.submit(new MergedRegionHandler(server, this, sn,
+ mergeRegions));
+ break;
+
case M_ZK_REGION_CLOSING:
// Should see CLOSING after we have asked it to CLOSE or additional
// times after already being in state of CLOSING
@@ -2056,9 +2103,9 @@
NodeExistsException nee = (NodeExistsException)e;
String path = nee.getPath();
try {
- if (isSplitOrSplitting(path)) {
- LOG.debug(path + " is SPLIT or SPLITTING; " +
- "skipping unassign because region no longer exists -- its split");
+ if (isSplitOrSplittingOrMergeOrMerging(path)) {
+ LOG.debug(path + " is SPLIT or SPLITTING or MERGE or MERGING; " +
+ "skipping unassign because region no longer exists -- its split or merge");
return;
}
} catch (KeeperException.NoNodeException ke) {
@@ -2136,21 +2183,23 @@
/**
* @param path
- * @return True if znode is in SPLIT or SPLITTING state.
+ * @return True if znode is in SPLIT or SPLITTING or MERGE or MERGING state.
* @throws KeeperException Can happen if the znode went away in meantime.
* @throws DeserializationException
*/
- private boolean isSplitOrSplitting(final String path)
+ private boolean isSplitOrSplittingOrMergeOrMerging(final String path)
throws KeeperException, DeserializationException {
boolean result = false;
- // This may fail if the SPLIT or SPLITTING znode gets cleaned up before we
- // can get data from it.
+ // This may fail if the SPLIT or SPLITTING or MERGE or MERGING znode gets
+ // cleaned up before we can get data from it.
byte [] data = ZKAssign.getData(watcher, path);
if (data == null) return false;
RegionTransition rt = RegionTransition.parseFrom(data);
switch (rt.getEventType()) {
case RS_ZK_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
+ case RS_ZK_REGION_MERGE:
+ case RS_ZK_REGION_MERGING:
result = true;
break;
default:
@@ -2898,9 +2947,31 @@
}
/**
+ * Update inmemory structures.
+ * @param sn Server that reported the merge
+ * @param merged regioninfo of merged
+ * @param a region a
+ * @param b region b
+ */
+ public void handleRegionsMergeReport(final ServerName sn,
+ final HRegionInfo merged, final HRegionInfo a, final HRegionInfo b) {
+ regionOffline(a);
+ regionOffline(b);
+ regionOnline(merged, sn);
+
+ // There's a possibility that the region was merging while a user asked
+ // the master to disable, we need to make sure we close those regions in
+ // that case. This is not racing with the region server itself since RS
+ // report is done after the regions merge transaction completed.
+ if (this.zkTable.isDisablingOrDisabledTable(merged.getTableNameAsString())) {
+ unassign(merged);
+ }
+ }
+
+ /**
* @param plan Plan to execute.
*/
- void balance(final RegionPlan plan) {
+ public void balance(final RegionPlan plan) {
synchronized (this.regionPlans) {
this.regionPlans.put(plan.getRegionName(), plan);
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 1460277)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (working copy)
@@ -55,35 +55,21 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Chore;
-import org.apache.hadoop.hbase.exceptions.ClockOutOfSyncException;
-import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
import org.apache.hadoop.hbase.CellScannable;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HealthCheckChore;
import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
-import org.apache.hadoop.hbase.exceptions.LeaseException;
-import org.apache.hadoop.hbase.exceptions.NoSuchColumnFamilyException;
-import org.apache.hadoop.hbase.exceptions.NotServingRegionException;
-import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
-import org.apache.hadoop.hbase.exceptions.RegionAlreadyInTransitionException;
-import org.apache.hadoop.hbase.exceptions.RegionMovedException;
-import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
import org.apache.hadoop.hbase.RegionServerStatusProtocol;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableDescriptors;
-import org.apache.hadoop.hbase.exceptions.RegionServerRunningException;
-import org.apache.hadoop.hbase.exceptions.RegionServerStoppedException;
-import org.apache.hadoop.hbase.exceptions.UnknownScannerException;
-import org.apache.hadoop.hbase.exceptions.YouAreDeadException;
import org.apache.hadoop.hbase.ZNodeClearer;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaEditor;
@@ -101,6 +87,21 @@
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.exceptions.ClockOutOfSyncException;
+import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
+import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
+import org.apache.hadoop.hbase.exceptions.LeaseException;
+import org.apache.hadoop.hbase.exceptions.NoSuchColumnFamilyException;
+import org.apache.hadoop.hbase.exceptions.NotServingRegionException;
+import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
+import org.apache.hadoop.hbase.exceptions.RegionAlreadyInTransitionException;
+import org.apache.hadoop.hbase.exceptions.RegionMovedException;
+import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
+import org.apache.hadoop.hbase.exceptions.RegionServerRunningException;
+import org.apache.hadoop.hbase.exceptions.RegionServerStoppedException;
+import org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.exceptions.UnknownScannerException;
+import org.apache.hadoop.hbase.exceptions.YouAreDeadException;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.executor.ExecutorType;
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
@@ -114,7 +115,6 @@
import org.apache.hadoop.hbase.ipc.ProtobufRpcClientEngine;
import org.apache.hadoop.hbase.ipc.RpcClientEngine;
import org.apache.hadoop.hbase.ipc.RpcServer;
-import org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
import org.apache.hadoop.hbase.master.TableLockManager;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -135,6 +135,8 @@
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
@@ -203,11 +205,11 @@
import org.apache.hadoop.hbase.util.VersionInfo;
import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.metrics.util.MBeanUtil;
import org.apache.hadoop.net.DNS;
@@ -3503,6 +3505,35 @@
}
/**
+ * Merge regions on the region server.
+ *
+ * @param controller the RPC controller
+ * @param request the request
+ * @return merge regions response
+ * @throws ServiceException
+ */
+ @Override
+ @QosPriority(priority = HConstants.HIGH_QOS)
+ public MergeRegionsResponse mergeRegions(final RpcController controller,
+ final MergeRegionsRequest request) throws ServiceException {
+ try {
+ checkOpen();
+ requestCount.increment();
+ HRegion regionA = getRegion(request.getRegionA());
+ HRegion regionB = getRegion(request.getRegionB());
+ boolean forcible = request.getForcible();
+ LOG.info("Receiving merging request for " + regionA + ", " + regionB
+ + ",forcible=" + forcible);
+ regionA.flushcache();
+ regionB.flushcache();
+ compactSplitThread.requestRegionsMerge(regionA, regionB, forcible);
+ return MergeRegionsResponse.newBuilder().build();
+ } catch (IOException ie) {
+ throw new ServiceException(ie);
+ }
+ }
+
+ /**
* Compact a region on the region server.
*
* @param controller the RPC controller
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (revision 1460277)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (working copy)
@@ -65,7 +65,7 @@
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
-import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.Triple;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -325,6 +325,11 @@
public TableLockManager getTableLockManager() {
return null;
}
+
+ @Override
+ public void dispatchMergingRegions(HRegionInfo region_a, HRegionInfo region_b,
+ boolean forcible) throws IOException {
+ }
}
@Test
@@ -546,9 +551,11 @@
splita.setOffline(true); //simulate that splita goes offline when it is split
splitParents.put(splita, createResult(splita, splitaa,splitab));
+ final Map mergedRegions = new TreeMap();
CatalogJanitor janitor = spy(new CatalogJanitor(server, services));
- doReturn(new Pair>(
- 10, splitParents)).when(janitor).getSplitParents();
+ doReturn(new Triple, Map>(
+ 10, mergedRegions, splitParents)).when(janitor)
+ .getMergedRegionsAndSplitParents();
//create ref from splita to parent
Path splitaRef =
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (revision 1460277)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (working copy)
@@ -18,8 +18,18 @@
*/
package org.apache.hadoop.hbase.client;
-import com.google.protobuf.ByteString;
-import com.google.protobuf.ServiceException;
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.net.SocketTimeoutException;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.regex.Pattern;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -78,6 +88,7 @@
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse;
@@ -109,17 +120,8 @@
import org.apache.hadoop.util.StringUtils;
import org.apache.zookeeper.KeeperException;
-import java.io.Closeable;
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.net.SocketTimeoutException;
-import java.util.Arrays;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.regex.Pattern;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.ServiceException;
/**
* Provides an interface to manage HBase database table metadata + general
@@ -1690,6 +1692,38 @@
}
/**
+ * Merge two regions. Asynchronous operation.
+ * @param encodedNameOfRegionA encoded name of region a
+ * @param encodedNameOfRegionB encoded name of region b
+ * @param forcible true if do a compulsory merge, otherwise we will only merge
+ * two adjacent regions
+ * @throws IOException
+ */
+ public void mergeRegions(final byte[] encodedNameOfRegionA,
+ final byte[] encodedNameOfRegionB, final boolean forcible)
+ throws IOException {
+ MasterAdminKeepAliveConnection master = connection
+ .getKeepAliveMasterAdmin();
+ try {
+ DispatchMergingRegionsRequest request = RequestConverter
+ .buildDispatchMergingRegionsRequest(encodedNameOfRegionA,
+ encodedNameOfRegionB, forcible);
+ master.dispatchMergingRegions(null, request);
+ } catch (ServiceException se) {
+ IOException ioe = ProtobufUtil.getRemoteException(se);
+ if (ioe instanceof UnknownRegionException) {
+ throw (UnknownRegionException) ioe;
+ }
+ LOG.error("Unexpected exception: " + se
+ + " from calling HMaster.dispatchMergingRegions");
+ } catch (DeserializationException de) {
+ LOG.error("Could not parse destination server name: " + de);
+ } finally {
+ master.close();
+ }
+ }
+
+ /**
* Split a table or an individual region.
* Asynchronous operation.
*
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java (revision 1460277)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java (working copy)
@@ -57,6 +57,7 @@
private final ThreadPoolExecutor largeCompactions;
private final ThreadPoolExecutor smallCompactions;
private final ThreadPoolExecutor splits;
+ private final ThreadPoolExecutor mergePool;
/**
* Splitting should not take place if the total number of regions exceed this.
@@ -118,6 +119,16 @@
return t;
}
});
+ int mergeThreads = conf.getInt("hbase.regionserver.thread.merge", 1);
+ this.mergePool = (ThreadPoolExecutor) Executors.newFixedThreadPool(
+ mergeThreads, new ThreadFactory() {
+ @Override
+ public Thread newThread(Runnable r) {
+ Thread t = new Thread(r);
+ t.setName(n + "-merges-" + System.currentTimeMillis());
+ return t;
+ }
+ });
}
@Override
@@ -125,7 +136,8 @@
return "compaction_queue=("
+ largeCompactions.getQueue().size() + ":"
+ smallCompactions.getQueue().size() + ")"
- + ", split_queue=" + splits.getQueue().size();
+ + ", split_queue=" + splits.getQueue().size()
+ + ", merge_queue=" + mergePool.getQueue().size();
}
public String dumpQueue() {
@@ -159,9 +171,32 @@
queueLists.append("\n");
}
+ queueLists.append("\n");
+ queueLists.append(" Region Merge Queue:\n");
+ lq = mergePool.getQueue();
+ it = lq.iterator();
+ while (it.hasNext()) {
+ queueLists.append(" " + it.next().toString());
+ queueLists.append("\n");
+ }
+
return queueLists.toString();
}
+ public synchronized void requestRegionsMerge(final HRegion a,
+ final HRegion b, final boolean forcible) {
+ try {
+ mergePool.execute(new RegionMergeRequest(a, b, this.server, forcible));
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Region merge requested for " + a + "," + b + ", forcible="
+ + forcible + ". " + this);
+ }
+ } catch (RejectedExecutionException ree) {
+ LOG.warn("Could not execute merge for " + a + "," + b + ", forcible="
+ + forcible, ree);
+ }
+ }
+
public synchronized boolean requestSplit(final HRegion r) {
// don't split regions that are blocking
if (shouldSplitRegion() && r.getCompactPriority() >= Store.PRIORITY_USER) {
@@ -270,6 +305,7 @@
*/
void interruptIfNecessary() {
splits.shutdown();
+ mergePool.shutdown();
largeCompactions.shutdown();
smallCompactions.shutdown();
}
@@ -291,6 +327,7 @@
void join() {
waitFor(splits, "Split Thread");
+ waitFor(mergePool, "Merge Thread");
waitFor(largeCompactions, "Large Compaction Thread");
waitFor(smallCompactions, "Small Compaction Thread");
}
Index: hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
===================================================================
--- hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (revision 1460277)
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (working copy)
@@ -381,6 +381,12 @@
/** The upper-half split region column qualifier */
public static final byte [] SPLITB_QUALIFIER = Bytes.toBytes("splitB");
+ /** The lower-half merge region column qualifier */
+ public static final byte[] MERGEA_QUALIFIER = Bytes.toBytes("mergeA");
+
+ /** The upper-half merge region column qualifier */
+ public static final byte[] MERGEB_QUALIFIER = Bytes.toBytes("mergeB");
+
/**
* The meta table version column qualifier.
* We keep current version of the meta table in this column in -ROOT-
Index: hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java
===================================================================
--- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java (revision 1460277)
+++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterAdminProtos.java (working copy)
@@ -3404,6 +3404,1013 @@
// @@protoc_insertion_point(class_scope:MoveRegionResponse)
}
+ public interface DispatchMergingRegionsRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .RegionSpecifier regionA = 1;
+ boolean hasRegionA();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegionA();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionAOrBuilder();
+
+ // required .RegionSpecifier regionB = 2;
+ boolean hasRegionB();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegionB();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionBOrBuilder();
+
+ // optional bool forcible = 3 [default = false];
+ boolean hasForcible();
+ boolean getForcible();
+ }
+ public static final class DispatchMergingRegionsRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements DispatchMergingRegionsRequestOrBuilder {
+ // Use DispatchMergingRegionsRequest.newBuilder() to construct.
+ private DispatchMergingRegionsRequest(Builder builder) {
+ super(builder);
+ }
+ private DispatchMergingRegionsRequest(boolean noInit) {}
+
+ private static final DispatchMergingRegionsRequest defaultInstance;
+ public static DispatchMergingRegionsRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public DispatchMergingRegionsRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DispatchMergingRegionsRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DispatchMergingRegionsRequest_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required .RegionSpecifier regionA = 1;
+ public static final int REGIONA_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier regionA_;
+ public boolean hasRegionA() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegionA() {
+ return regionA_;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionAOrBuilder() {
+ return regionA_;
+ }
+
+ // required .RegionSpecifier regionB = 2;
+ public static final int REGIONB_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier regionB_;
+ public boolean hasRegionB() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegionB() {
+ return regionB_;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionBOrBuilder() {
+ return regionB_;
+ }
+
+ // optional bool forcible = 3 [default = false];
+ public static final int FORCIBLE_FIELD_NUMBER = 3;
+ private boolean forcible_;
+ public boolean hasForcible() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ public boolean getForcible() {
+ return forcible_;
+ }
+
+ private void initFields() {
+ regionA_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+ regionB_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+ forcible_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasRegionA()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasRegionB()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getRegionA().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getRegionB().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, regionA_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, regionB_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBool(3, forcible_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, regionA_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, regionB_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(3, forcible_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest) obj;
+
+ boolean result = true;
+ result = result && (hasRegionA() == other.hasRegionA());
+ if (hasRegionA()) {
+ result = result && getRegionA()
+ .equals(other.getRegionA());
+ }
+ result = result && (hasRegionB() == other.hasRegionB());
+ if (hasRegionB()) {
+ result = result && getRegionB()
+ .equals(other.getRegionB());
+ }
+ result = result && (hasForcible() == other.hasForcible());
+ if (hasForcible()) {
+ result = result && (getForcible()
+ == other.getForcible());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasRegionA()) {
+ hash = (37 * hash) + REGIONA_FIELD_NUMBER;
+ hash = (53 * hash) + getRegionA().hashCode();
+ }
+ if (hasRegionB()) {
+ hash = (37 * hash) + REGIONB_FIELD_NUMBER;
+ hash = (53 * hash) + getRegionB().hashCode();
+ }
+ if (hasForcible()) {
+ hash = (37 * hash) + FORCIBLE_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getForcible());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DispatchMergingRegionsRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DispatchMergingRegionsRequest_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getRegionAFieldBuilder();
+ getRegionBFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (regionABuilder_ == null) {
+ regionA_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+ } else {
+ regionABuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (regionBBuilder_ == null) {
+ regionB_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+ } else {
+ regionBBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ forcible_ = false;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (regionABuilder_ == null) {
+ result.regionA_ = regionA_;
+ } else {
+ result.regionA_ = regionABuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (regionBBuilder_ == null) {
+ result.regionB_ = regionB_;
+ } else {
+ result.regionB_ = regionBBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.forcible_ = forcible_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest.getDefaultInstance()) return this;
+ if (other.hasRegionA()) {
+ mergeRegionA(other.getRegionA());
+ }
+ if (other.hasRegionB()) {
+ mergeRegionB(other.getRegionB());
+ }
+ if (other.hasForcible()) {
+ setForcible(other.getForcible());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasRegionA()) {
+
+ return false;
+ }
+ if (!hasRegionB()) {
+
+ return false;
+ }
+ if (!getRegionA().isInitialized()) {
+
+ return false;
+ }
+ if (!getRegionB().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder();
+ if (hasRegionA()) {
+ subBuilder.mergeFrom(getRegionA());
+ }
+ input.readMessage(subBuilder, extensionRegistry);
+ setRegionA(subBuilder.buildPartial());
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder();
+ if (hasRegionB()) {
+ subBuilder.mergeFrom(getRegionB());
+ }
+ input.readMessage(subBuilder, extensionRegistry);
+ setRegionB(subBuilder.buildPartial());
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ forcible_ = input.readBool();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required .RegionSpecifier regionA = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier regionA_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionABuilder_;
+ public boolean hasRegionA() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegionA() {
+ if (regionABuilder_ == null) {
+ return regionA_;
+ } else {
+ return regionABuilder_.getMessage();
+ }
+ }
+ public Builder setRegionA(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
+ if (regionABuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ regionA_ = value;
+ onChanged();
+ } else {
+ regionABuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder setRegionA(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
+ if (regionABuilder_ == null) {
+ regionA_ = builderForValue.build();
+ onChanged();
+ } else {
+ regionABuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder mergeRegionA(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
+ if (regionABuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ regionA_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) {
+ regionA_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(regionA_).mergeFrom(value).buildPartial();
+ } else {
+ regionA_ = value;
+ }
+ onChanged();
+ } else {
+ regionABuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ public Builder clearRegionA() {
+ if (regionABuilder_ == null) {
+ regionA_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+ onChanged();
+ } else {
+ regionABuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionABuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getRegionAFieldBuilder().getBuilder();
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionAOrBuilder() {
+ if (regionABuilder_ != null) {
+ return regionABuilder_.getMessageOrBuilder();
+ } else {
+ return regionA_;
+ }
+ }
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>
+ getRegionAFieldBuilder() {
+ if (regionABuilder_ == null) {
+ regionABuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>(
+ regionA_,
+ getParentForChildren(),
+ isClean());
+ regionA_ = null;
+ }
+ return regionABuilder_;
+ }
+
+ // required .RegionSpecifier regionB = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier regionB_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBBuilder_;
+ public boolean hasRegionB() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegionB() {
+ if (regionBBuilder_ == null) {
+ return regionB_;
+ } else {
+ return regionBBuilder_.getMessage();
+ }
+ }
+ public Builder setRegionB(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
+ if (regionBBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ regionB_ = value;
+ onChanged();
+ } else {
+ regionBBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ public Builder setRegionB(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
+ if (regionBBuilder_ == null) {
+ regionB_ = builderForValue.build();
+ onChanged();
+ } else {
+ regionBBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ public Builder mergeRegionB(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
+ if (regionBBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ regionB_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) {
+ regionB_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(regionB_).mergeFrom(value).buildPartial();
+ } else {
+ regionB_ = value;
+ }
+ onChanged();
+ } else {
+ regionBBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ public Builder clearRegionB() {
+ if (regionBBuilder_ == null) {
+ regionB_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+ onChanged();
+ } else {
+ regionBBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getRegionBFieldBuilder().getBuilder();
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionBOrBuilder() {
+ if (regionBBuilder_ != null) {
+ return regionBBuilder_.getMessageOrBuilder();
+ } else {
+ return regionB_;
+ }
+ }
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>
+ getRegionBFieldBuilder() {
+ if (regionBBuilder_ == null) {
+ regionBBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>(
+ regionB_,
+ getParentForChildren(),
+ isClean());
+ regionB_ = null;
+ }
+ return regionBBuilder_;
+ }
+
+ // optional bool forcible = 3 [default = false];
+ private boolean forcible_ ;
+ public boolean hasForcible() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ public boolean getForcible() {
+ return forcible_;
+ }
+ public Builder setForcible(boolean value) {
+ bitField0_ |= 0x00000004;
+ forcible_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearForcible() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ forcible_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:DispatchMergingRegionsRequest)
+ }
+
+ static {
+ defaultInstance = new DispatchMergingRegionsRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:DispatchMergingRegionsRequest)
+ }
+
+ public interface DispatchMergingRegionsResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+ }
+ public static final class DispatchMergingRegionsResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements DispatchMergingRegionsResponseOrBuilder {
+ // Use DispatchMergingRegionsResponse.newBuilder() to construct.
+ private DispatchMergingRegionsResponse(Builder builder) {
+ super(builder);
+ }
+ private DispatchMergingRegionsResponse(boolean noInit) {}
+
+ private static final DispatchMergingRegionsResponse defaultInstance;
+ public static DispatchMergingRegionsResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public DispatchMergingRegionsResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DispatchMergingRegionsResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DispatchMergingRegionsResponse_fieldAccessorTable;
+ }
+
+ private void initFields() {
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse) obj;
+
+ boolean result = true;
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DispatchMergingRegionsResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.internal_static_DispatchMergingRegionsResponse_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse(this);
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse.getDefaultInstance()) return this;
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:DispatchMergingRegionsResponse)
+ }
+
+ static {
+ defaultInstance = new DispatchMergingRegionsResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:DispatchMergingRegionsResponse)
+ }
+
public interface AssignRegionRequestOrBuilder
extends com.google.protobuf.MessageOrBuilder {
@@ -19524,6 +20531,11 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest request,
com.google.protobuf.RpcCallback done);
+ public abstract void dispatchMergingRegions(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest request,
+ com.google.protobuf.RpcCallback done);
+
public abstract void assignRegion(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest request,
@@ -19672,6 +20684,14 @@
}
@java.lang.Override
+ public void dispatchMergingRegions(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.dispatchMergingRegions(controller, request, done);
+ }
+
+ @java.lang.Override
public void assignRegion(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest request,
@@ -19878,48 +20898,50 @@
case 3:
return impl.moveRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest)request);
case 4:
+ return impl.dispatchMergingRegions(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest)request);
+ case 5:
return impl.assignRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest)request);
- case 5:
+ case 6:
return impl.unassignRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest)request);
- case 6:
+ case 7:
return impl.offlineRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest)request);
- case 7:
+ case 8:
return impl.deleteTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest)request);
- case 8:
+ case 9:
return impl.enableTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest)request);
- case 9:
+ case 10:
return impl.disableTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest)request);
- case 10:
+ case 11:
return impl.modifyTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest)request);
- case 11:
+ case 12:
return impl.createTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest)request);
- case 12:
+ case 13:
return impl.shutdown(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest)request);
- case 13:
+ case 14:
return impl.stopMaster(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest)request);
- case 14:
+ case 15:
return impl.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest)request);
- case 15:
+ case 16:
return impl.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest)request);
- case 16:
+ case 17:
return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest)request);
- case 17:
+ case 18:
return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest)request);
- case 18:
+ case 19:
return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest)request);
- case 19:
+ case 20:
return impl.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request);
- case 20:
+ case 21:
return impl.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest)request);
- case 21:
+ case 22:
return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest)request);
- case 22:
+ case 23:
return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest)request);
- case 23:
+ case 24:
return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest)request);
- case 24:
+ case 25:
return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest)request);
- case 25:
+ case 26:
return impl.isRestoreSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest)request);
default:
throw new java.lang.AssertionError("Can't get here.");
@@ -19944,48 +20966,50 @@
case 3:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest.getDefaultInstance();
case 4:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest.getDefaultInstance();
+ case 5:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest.getDefaultInstance();
- case 5:
+ case 6:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest.getDefaultInstance();
- case 6:
+ case 7:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest.getDefaultInstance();
- case 7:
+ case 8:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest.getDefaultInstance();
- case 8:
+ case 9:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest.getDefaultInstance();
- case 9:
+ case 10:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest.getDefaultInstance();
- case 10:
+ case 11:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest.getDefaultInstance();
- case 11:
+ case 12:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest.getDefaultInstance();
- case 12:
+ case 13:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest.getDefaultInstance();
- case 13:
+ case 14:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest.getDefaultInstance();
- case 14:
+ case 15:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest.getDefaultInstance();
- case 15:
+ case 16:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest.getDefaultInstance();
- case 16:
+ case 17:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.getDefaultInstance();
- case 17:
+ case 18:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.getDefaultInstance();
- case 18:
+ case 19:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance();
- case 19:
+ case 20:
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance();
- case 20:
+ case 21:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest.getDefaultInstance();
- case 21:
+ case 22:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest.getDefaultInstance();
- case 22:
+ case 23:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest.getDefaultInstance();
- case 23:
+ case 24:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest.getDefaultInstance();
- case 24:
+ case 25:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest.getDefaultInstance();
- case 25:
+ case 26:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
@@ -20010,48 +21034,50 @@
case 3:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionResponse.getDefaultInstance();
case 4:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse.getDefaultInstance();
+ case 5:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse.getDefaultInstance();
- case 5:
+ case 6:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse.getDefaultInstance();
- case 6:
+ case 7:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse.getDefaultInstance();
- case 7:
+ case 8:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse.getDefaultInstance();
- case 8:
+ case 9:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse.getDefaultInstance();
- case 9:
+ case 10:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse.getDefaultInstance();
- case 10:
+ case 11:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.getDefaultInstance();
- case 11:
+ case 12:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse.getDefaultInstance();
- case 12:
+ case 13:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse.getDefaultInstance();
- case 13:
+ case 14:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse.getDefaultInstance();
- case 14:
+ case 15:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse.getDefaultInstance();
- case 15:
+ case 16:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.getDefaultInstance();
- case 16:
+ case 17:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.getDefaultInstance();
- case 17:
+ case 18:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDefaultInstance();
- case 18:
+ case 19:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance();
- case 19:
+ case 20:
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance();
- case 20:
+ case 21:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.getDefaultInstance();
- case 21:
+ case 22:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.getDefaultInstance();
- case 22:
+ case 23:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.getDefaultInstance();
- case 23:
+ case 24:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.getDefaultInstance();
- case 24:
+ case 25:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse.getDefaultInstance();
- case 25:
+ case 26:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
@@ -20081,6 +21107,11 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest request,
com.google.protobuf.RpcCallback done);
+ public abstract void dispatchMergingRegions(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest request,
+ com.google.protobuf.RpcCallback done);
+
public abstract void assignRegion(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest request,
@@ -20234,111 +21265,116 @@
done));
return;
case 4:
+ this.dispatchMergingRegions(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
+ case 5:
this.assignRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 5:
+ case 6:
this.unassignRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 6:
+ case 7:
this.offlineRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 7:
+ case 8:
this.deleteTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 8:
+ case 9:
this.enableTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 9:
+ case 10:
this.disableTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 10:
+ case 11:
this.modifyTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 11:
+ case 12:
this.createTable(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 12:
+ case 13:
this.shutdown(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 13:
+ case 14:
this.stopMaster(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 14:
+ case 15:
this.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 15:
+ case 16:
this.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 16:
+ case 17:
this.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 17:
+ case 18:
this.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 18:
+ case 19:
this.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 19:
+ case 20:
this.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 20:
+ case 21:
this.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 21:
+ case 22:
this.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 22:
+ case 23:
this.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 23:
+ case 24:
this.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 24:
+ case 25:
this.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 25:
+ case 26:
this.isRestoreSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
@@ -20366,48 +21402,50 @@
case 3:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest.getDefaultInstance();
case 4:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest.getDefaultInstance();
+ case 5:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest.getDefaultInstance();
- case 5:
+ case 6:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest.getDefaultInstance();
- case 6:
+ case 7:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest.getDefaultInstance();
- case 7:
+ case 8:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest.getDefaultInstance();
- case 8:
+ case 9:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest.getDefaultInstance();
- case 9:
+ case 10:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest.getDefaultInstance();
- case 10:
+ case 11:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest.getDefaultInstance();
- case 11:
+ case 12:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest.getDefaultInstance();
- case 12:
+ case 13:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest.getDefaultInstance();
- case 13:
+ case 14:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest.getDefaultInstance();
- case 14:
+ case 15:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest.getDefaultInstance();
- case 15:
+ case 16:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest.getDefaultInstance();
- case 16:
+ case 17:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.getDefaultInstance();
- case 17:
+ case 18:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.getDefaultInstance();
- case 18:
+ case 19:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance();
- case 19:
+ case 20:
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance();
- case 20:
+ case 21:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest.getDefaultInstance();
- case 21:
+ case 22:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest.getDefaultInstance();
- case 22:
+ case 23:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest.getDefaultInstance();
- case 23:
+ case 24:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest.getDefaultInstance();
- case 24:
+ case 25:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest.getDefaultInstance();
- case 25:
+ case 26:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
@@ -20432,48 +21470,50 @@
case 3:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionResponse.getDefaultInstance();
case 4:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse.getDefaultInstance();
+ case 5:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse.getDefaultInstance();
- case 5:
+ case 6:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse.getDefaultInstance();
- case 6:
+ case 7:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse.getDefaultInstance();
- case 7:
+ case 8:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse.getDefaultInstance();
- case 8:
+ case 9:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse.getDefaultInstance();
- case 9:
+ case 10:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse.getDefaultInstance();
- case 10:
+ case 11:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.getDefaultInstance();
- case 11:
+ case 12:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse.getDefaultInstance();
- case 12:
+ case 13:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse.getDefaultInstance();
- case 13:
+ case 14:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse.getDefaultInstance();
- case 14:
+ case 15:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse.getDefaultInstance();
- case 15:
+ case 16:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.getDefaultInstance();
- case 16:
+ case 17:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.getDefaultInstance();
- case 17:
+ case 18:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDefaultInstance();
- case 18:
+ case 19:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance();
- case 19:
+ case 20:
return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance();
- case 20:
+ case 21:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.getDefaultInstance();
- case 21:
+ case 22:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.getDefaultInstance();
- case 22:
+ case 23:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.getDefaultInstance();
- case 23:
+ case 24:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.getDefaultInstance();
- case 24:
+ case 25:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse.getDefaultInstance();
- case 25:
+ case 26:
return org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
@@ -20556,12 +21596,27 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionResponse.getDefaultInstance()));
}
+ public void dispatchMergingRegions(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(4),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse.getDefaultInstance()));
+ }
+
public void assignRegion(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(4),
+ getDescriptor().getMethods().get(5),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse.getDefaultInstance(),
@@ -20576,7 +21631,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(5),
+ getDescriptor().getMethods().get(6),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse.getDefaultInstance(),
@@ -20591,7 +21646,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(6),
+ getDescriptor().getMethods().get(7),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse.getDefaultInstance(),
@@ -20606,7 +21661,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(7),
+ getDescriptor().getMethods().get(8),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse.getDefaultInstance(),
@@ -20621,7 +21676,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(8),
+ getDescriptor().getMethods().get(9),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse.getDefaultInstance(),
@@ -20636,7 +21691,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(9),
+ getDescriptor().getMethods().get(10),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse.getDefaultInstance(),
@@ -20651,7 +21706,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(10),
+ getDescriptor().getMethods().get(11),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.getDefaultInstance(),
@@ -20666,7 +21721,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(11),
+ getDescriptor().getMethods().get(12),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse.getDefaultInstance(),
@@ -20681,7 +21736,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(12),
+ getDescriptor().getMethods().get(13),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse.getDefaultInstance(),
@@ -20696,7 +21751,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(13),
+ getDescriptor().getMethods().get(14),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse.getDefaultInstance(),
@@ -20711,7 +21766,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(14),
+ getDescriptor().getMethods().get(15),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse.getDefaultInstance(),
@@ -20726,7 +21781,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(15),
+ getDescriptor().getMethods().get(16),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.getDefaultInstance(),
@@ -20741,7 +21796,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(16),
+ getDescriptor().getMethods().get(17),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.getDefaultInstance(),
@@ -20756,7 +21811,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(17),
+ getDescriptor().getMethods().get(18),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDefaultInstance(),
@@ -20771,7 +21826,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(18),
+ getDescriptor().getMethods().get(19),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(),
@@ -20786,7 +21841,7 @@
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(19),
+ getDescriptor().getMethods().get(20),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(),
@@ -20801,7 +21856,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(20),
+ getDescriptor().getMethods().get(21),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.getDefaultInstance(),
@@ -20816,7 +21871,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(21),
+ getDescriptor().getMethods().get(22),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.getDefaultInstance(),
@@ -20831,7 +21886,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(22),
+ getDescriptor().getMethods().get(23),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.getDefaultInstance(),
@@ -20846,7 +21901,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(23),
+ getDescriptor().getMethods().get(24),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.getDefaultInstance(),
@@ -20861,7 +21916,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(24),
+ getDescriptor().getMethods().get(25),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse.getDefaultInstance(),
@@ -20876,7 +21931,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(25),
+ getDescriptor().getMethods().get(26),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance(),
@@ -20913,6 +21968,11 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest request)
throws com.google.protobuf.ServiceException;
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse dispatchMergingRegions(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest request)
+ throws com.google.protobuf.ServiceException;
+
public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse assignRegion(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest request)
@@ -21079,12 +22139,24 @@
}
+ public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse dispatchMergingRegions(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(4),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse.getDefaultInstance());
+ }
+
+
public org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse assignRegion(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(4),
+ getDescriptor().getMethods().get(5),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse.getDefaultInstance());
@@ -21096,7 +22168,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(5),
+ getDescriptor().getMethods().get(6),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse.getDefaultInstance());
@@ -21108,7 +22180,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(6),
+ getDescriptor().getMethods().get(7),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse.getDefaultInstance());
@@ -21120,7 +22192,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(7),
+ getDescriptor().getMethods().get(8),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse.getDefaultInstance());
@@ -21132,7 +22204,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(8),
+ getDescriptor().getMethods().get(9),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse.getDefaultInstance());
@@ -21144,7 +22216,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(9),
+ getDescriptor().getMethods().get(10),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse.getDefaultInstance());
@@ -21156,7 +22228,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(10),
+ getDescriptor().getMethods().get(11),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.getDefaultInstance());
@@ -21168,7 +22240,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(11),
+ getDescriptor().getMethods().get(12),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse.getDefaultInstance());
@@ -21180,7 +22252,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(12),
+ getDescriptor().getMethods().get(13),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse.getDefaultInstance());
@@ -21192,7 +22264,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(13),
+ getDescriptor().getMethods().get(14),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse.getDefaultInstance());
@@ -21204,7 +22276,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(14),
+ getDescriptor().getMethods().get(15),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse.getDefaultInstance());
@@ -21216,7 +22288,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(15),
+ getDescriptor().getMethods().get(16),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.getDefaultInstance());
@@ -21228,7 +22300,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(16),
+ getDescriptor().getMethods().get(17),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.getDefaultInstance());
@@ -21240,7 +22312,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(17),
+ getDescriptor().getMethods().get(18),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.getDefaultInstance());
@@ -21252,7 +22324,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(18),
+ getDescriptor().getMethods().get(19),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance());
@@ -21264,7 +22336,7 @@
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(19),
+ getDescriptor().getMethods().get(20),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance());
@@ -21276,7 +22348,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(20),
+ getDescriptor().getMethods().get(21),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.getDefaultInstance());
@@ -21288,7 +22360,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(21),
+ getDescriptor().getMethods().get(22),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.getDefaultInstance());
@@ -21300,7 +22372,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(22),
+ getDescriptor().getMethods().get(23),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.getDefaultInstance());
@@ -21312,7 +22384,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(23),
+ getDescriptor().getMethods().get(24),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.getDefaultInstance());
@@ -21324,7 +22396,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(24),
+ getDescriptor().getMethods().get(25),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse.getDefaultInstance());
@@ -21336,7 +22408,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(25),
+ getDescriptor().getMethods().get(26),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance());
@@ -21386,6 +22458,16 @@
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_MoveRegionResponse_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_DispatchMergingRegionsRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_DispatchMergingRegionsRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_DispatchMergingRegionsResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_DispatchMergingRegionsResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
internal_static_AssignRegionRequest_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -21615,97 +22697,103 @@
"hema\"\026\n\024ModifyColumnResponse\"Z\n\021MoveRegi" +
"onRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecif",
"ier\022#\n\016destServerName\030\002 \001(\0132\013.ServerName" +
- "\"\024\n\022MoveRegionResponse\"7\n\023AssignRegionRe" +
- "quest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\"" +
- "\026\n\024AssignRegionResponse\"O\n\025UnassignRegio" +
- "nRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifi" +
- "er\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026UnassignRegi" +
- "onResponse\"8\n\024OfflineRegionRequest\022 \n\006re" +
- "gion\030\001 \002(\0132\020.RegionSpecifier\"\027\n\025OfflineR" +
- "egionResponse\"J\n\022CreateTableRequest\022!\n\013t" +
- "ableSchema\030\001 \002(\0132\014.TableSchema\022\021\n\tsplitK",
- "eys\030\002 \003(\014\"\025\n\023CreateTableResponse\"\'\n\022Dele" +
- "teTableRequest\022\021\n\ttableName\030\001 \002(\014\"\025\n\023Del" +
- "eteTableResponse\"\'\n\022EnableTableRequest\022\021" +
- "\n\ttableName\030\001 \002(\014\"\025\n\023EnableTableResponse" +
- "\"(\n\023DisableTableRequest\022\021\n\ttableName\030\001 \002" +
- "(\014\"\026\n\024DisableTableResponse\"J\n\022ModifyTabl" +
- "eRequest\022\021\n\ttableName\030\001 \002(\014\022!\n\013tableSche" +
- "ma\030\002 \002(\0132\014.TableSchema\"\025\n\023ModifyTableRes" +
- "ponse\"\021\n\017ShutdownRequest\"\022\n\020ShutdownResp" +
- "onse\"\023\n\021StopMasterRequest\"\024\n\022StopMasterR",
- "esponse\"\020\n\016BalanceRequest\"&\n\017BalanceResp" +
- "onse\022\023\n\013balancerRan\030\001 \002(\010\"<\n\031SetBalancer" +
- "RunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronou" +
- "s\030\002 \001(\010\"6\n\032SetBalancerRunningResponse\022\030\n" +
- "\020prevBalanceValue\030\001 \001(\010\"\024\n\022CatalogScanRe" +
- "quest\")\n\023CatalogScanResponse\022\022\n\nscanResu" +
- "lt\030\001 \001(\005\"-\n\033EnableCatalogJanitorRequest\022" +
- "\016\n\006enable\030\001 \002(\010\"1\n\034EnableCatalogJanitorR" +
- "esponse\022\021\n\tprevValue\030\001 \001(\010\" \n\036IsCatalogJ" +
- "anitorEnabledRequest\"0\n\037IsCatalogJanitor",
- "EnabledResponse\022\r\n\005value\030\001 \002(\010\"=\n\023TakeSn" +
- "apshotRequest\022&\n\010snapshot\030\001 \002(\0132\024.Snapsh" +
- "otDescription\"/\n\024TakeSnapshotResponse\022\027\n" +
- "\017expectedTimeout\030\001 \002(\003\"\025\n\023ListSnapshotRe" +
- "quest\"?\n\024ListSnapshotResponse\022\'\n\tsnapsho" +
- "ts\030\001 \003(\0132\024.SnapshotDescription\"?\n\025Delete" +
+ "\"\024\n\022MoveRegionResponse\"~\n\035DispatchMergin" +
+ "gRegionsRequest\022!\n\007regionA\030\001 \002(\0132\020.Regio" +
+ "nSpecifier\022!\n\007regionB\030\002 \002(\0132\020.RegionSpec" +
+ "ifier\022\027\n\010forcible\030\003 \001(\010:\005false\" \n\036Dispat" +
+ "chMergingRegionsResponse\"7\n\023AssignRegion" +
+ "Request\022 \n\006region\030\001 \002(\0132\020.RegionSpecifie" +
+ "r\"\026\n\024AssignRegionResponse\"O\n\025UnassignReg" +
+ "ionRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpeci" +
+ "fier\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026UnassignRe",
+ "gionResponse\"8\n\024OfflineRegionRequest\022 \n\006" +
+ "region\030\001 \002(\0132\020.RegionSpecifier\"\027\n\025Offlin" +
+ "eRegionResponse\"J\n\022CreateTableRequest\022!\n" +
+ "\013tableSchema\030\001 \002(\0132\014.TableSchema\022\021\n\tspli" +
+ "tKeys\030\002 \003(\014\"\025\n\023CreateTableResponse\"\'\n\022De" +
+ "leteTableRequest\022\021\n\ttableName\030\001 \002(\014\"\025\n\023D" +
+ "eleteTableResponse\"\'\n\022EnableTableRequest" +
+ "\022\021\n\ttableName\030\001 \002(\014\"\025\n\023EnableTableRespon" +
+ "se\"(\n\023DisableTableRequest\022\021\n\ttableName\030\001" +
+ " \002(\014\"\026\n\024DisableTableResponse\"J\n\022ModifyTa",
+ "bleRequest\022\021\n\ttableName\030\001 \002(\014\022!\n\013tableSc" +
+ "hema\030\002 \002(\0132\014.TableSchema\"\025\n\023ModifyTableR" +
+ "esponse\"\021\n\017ShutdownRequest\"\022\n\020ShutdownRe" +
+ "sponse\"\023\n\021StopMasterRequest\"\024\n\022StopMaste" +
+ "rResponse\"\020\n\016BalanceRequest\"&\n\017BalanceRe" +
+ "sponse\022\023\n\013balancerRan\030\001 \002(\010\"<\n\031SetBalanc" +
+ "erRunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchron" +
+ "ous\030\002 \001(\010\"6\n\032SetBalancerRunningResponse\022" +
+ "\030\n\020prevBalanceValue\030\001 \001(\010\"\024\n\022CatalogScan" +
+ "Request\")\n\023CatalogScanResponse\022\022\n\nscanRe",
+ "sult\030\001 \001(\005\"-\n\033EnableCatalogJanitorReques" +
+ "t\022\016\n\006enable\030\001 \002(\010\"1\n\034EnableCatalogJanito" +
+ "rResponse\022\021\n\tprevValue\030\001 \001(\010\" \n\036IsCatalo" +
+ "gJanitorEnabledRequest\"0\n\037IsCatalogJanit" +
+ "orEnabledResponse\022\r\n\005value\030\001 \002(\010\"=\n\023Take" +
"SnapshotRequest\022&\n\010snapshot\030\001 \002(\0132\024.Snap" +
- "shotDescription\"\030\n\026DeleteSnapshotRespons" +
- "e\"@\n\026RestoreSnapshotRequest\022&\n\010snapshot\030" +
- "\001 \002(\0132\024.SnapshotDescription\"\031\n\027RestoreSn",
- "apshotResponse\"?\n\025IsSnapshotDoneRequest\022" +
- "&\n\010snapshot\030\001 \001(\0132\024.SnapshotDescription\"" +
- "U\n\026IsSnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:" +
- "\005false\022&\n\010snapshot\030\002 \001(\0132\024.SnapshotDescr" +
- "iption\"F\n\034IsRestoreSnapshotDoneRequest\022&" +
- "\n\010snapshot\030\001 \001(\0132\024.SnapshotDescription\"3" +
- "\n\035IsRestoreSnapshotDoneResponse\022\022\n\004done\030" +
- "\001 \001(\010:\004true2\244\r\n\022MasterAdminService\0222\n\tad" +
- "dColumn\022\021.AddColumnRequest\032\022.AddColumnRe" +
- "sponse\022;\n\014deleteColumn\022\024.DeleteColumnReq",
- "uest\032\025.DeleteColumnResponse\022;\n\014modifyCol" +
- "umn\022\024.ModifyColumnRequest\032\025.ModifyColumn" +
- "Response\0225\n\nmoveRegion\022\022.MoveRegionReque" +
- "st\032\023.MoveRegionResponse\022;\n\014assignRegion\022" +
- "\024.AssignRegionRequest\032\025.AssignRegionResp" +
- "onse\022A\n\016unassignRegion\022\026.UnassignRegionR" +
- "equest\032\027.UnassignRegionResponse\022>\n\roffli" +
- "neRegion\022\025.OfflineRegionRequest\032\026.Offlin" +
- "eRegionResponse\0228\n\013deleteTable\022\023.DeleteT" +
- "ableRequest\032\024.DeleteTableResponse\0228\n\013ena",
- "bleTable\022\023.EnableTableRequest\032\024.EnableTa" +
- "bleResponse\022;\n\014disableTable\022\024.DisableTab" +
- "leRequest\032\025.DisableTableResponse\0228\n\013modi" +
- "fyTable\022\023.ModifyTableRequest\032\024.ModifyTab" +
- "leResponse\0228\n\013createTable\022\023.CreateTableR" +
- "equest\032\024.CreateTableResponse\022/\n\010shutdown" +
- "\022\020.ShutdownRequest\032\021.ShutdownResponse\0225\n" +
- "\nstopMaster\022\022.StopMasterRequest\032\023.StopMa" +
- "sterResponse\022,\n\007balance\022\017.BalanceRequest" +
- "\032\020.BalanceResponse\022M\n\022setBalancerRunning",
- "\022\032.SetBalancerRunningRequest\032\033.SetBalanc" +
- "erRunningResponse\022;\n\016runCatalogScan\022\023.Ca" +
- "talogScanRequest\032\024.CatalogScanResponse\022S" +
- "\n\024enableCatalogJanitor\022\034.EnableCatalogJa" +
- "nitorRequest\032\035.EnableCatalogJanitorRespo" +
- "nse\022\\\n\027isCatalogJanitorEnabled\022\037.IsCatal" +
- "ogJanitorEnabledRequest\032 .IsCatalogJanit" +
- "orEnabledResponse\022L\n\021execMasterService\022\032" +
- ".CoprocessorServiceRequest\032\033.Coprocessor" +
- "ServiceResponse\0227\n\010snapshot\022\024.TakeSnapsh",
- "otRequest\032\025.TakeSnapshotResponse\022D\n\025getC" +
- "ompletedSnapshots\022\024.ListSnapshotRequest\032" +
- "\025.ListSnapshotResponse\022A\n\016deleteSnapshot" +
- "\022\026.DeleteSnapshotRequest\032\027.DeleteSnapsho" +
- "tResponse\022A\n\016isSnapshotDone\022\026.IsSnapshot" +
- "DoneRequest\032\027.IsSnapshotDoneResponse\022D\n\017" +
- "restoreSnapshot\022\027.RestoreSnapshotRequest" +
- "\032\030.RestoreSnapshotResponse\022V\n\025isRestoreS" +
- "napshotDone\022\035.IsRestoreSnapshotDoneReque" +
- "st\032\036.IsRestoreSnapshotDoneResponseBG\n*or",
- "g.apache.hadoop.hbase.protobuf.generated" +
- "B\021MasterAdminProtosH\001\210\001\001\240\001\001"
+ "shotDescription\"/\n\024TakeSnapshotResponse\022" +
+ "\027\n\017expectedTimeout\030\001 \002(\003\"\025\n\023ListSnapshot" +
+ "Request\"?\n\024ListSnapshotResponse\022\'\n\tsnaps" +
+ "hots\030\001 \003(\0132\024.SnapshotDescription\"?\n\025Dele",
+ "teSnapshotRequest\022&\n\010snapshot\030\001 \002(\0132\024.Sn" +
+ "apshotDescription\"\030\n\026DeleteSnapshotRespo" +
+ "nse\"@\n\026RestoreSnapshotRequest\022&\n\010snapsho" +
+ "t\030\001 \002(\0132\024.SnapshotDescription\"\031\n\027Restore" +
+ "SnapshotResponse\"?\n\025IsSnapshotDoneReques" +
+ "t\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotDescriptio" +
+ "n\"U\n\026IsSnapshotDoneResponse\022\023\n\004done\030\001 \001(" +
+ "\010:\005false\022&\n\010snapshot\030\002 \001(\0132\024.SnapshotDes" +
+ "cription\"F\n\034IsRestoreSnapshotDoneRequest" +
+ "\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotDescription",
+ "\"3\n\035IsRestoreSnapshotDoneResponse\022\022\n\004don" +
+ "e\030\001 \001(\010:\004true2\377\r\n\022MasterAdminService\0222\n\t" +
+ "addColumn\022\021.AddColumnRequest\032\022.AddColumn" +
+ "Response\022;\n\014deleteColumn\022\024.DeleteColumnR" +
+ "equest\032\025.DeleteColumnResponse\022;\n\014modifyC" +
+ "olumn\022\024.ModifyColumnRequest\032\025.ModifyColu" +
+ "mnResponse\0225\n\nmoveRegion\022\022.MoveRegionReq" +
+ "uest\032\023.MoveRegionResponse\022Y\n\026dispatchMer" +
+ "gingRegions\022\036.DispatchMergingRegionsRequ" +
+ "est\032\037.DispatchMergingRegionsResponse\022;\n\014",
+ "assignRegion\022\024.AssignRegionRequest\032\025.Ass" +
+ "ignRegionResponse\022A\n\016unassignRegion\022\026.Un" +
+ "assignRegionRequest\032\027.UnassignRegionResp" +
+ "onse\022>\n\rofflineRegion\022\025.OfflineRegionReq" +
+ "uest\032\026.OfflineRegionResponse\0228\n\013deleteTa" +
+ "ble\022\023.DeleteTableRequest\032\024.DeleteTableRe" +
+ "sponse\0228\n\013enableTable\022\023.EnableTableReque" +
+ "st\032\024.EnableTableResponse\022;\n\014disableTable" +
+ "\022\024.DisableTableRequest\032\025.DisableTableRes" +
+ "ponse\0228\n\013modifyTable\022\023.ModifyTableReques",
+ "t\032\024.ModifyTableResponse\0228\n\013createTable\022\023" +
+ ".CreateTableRequest\032\024.CreateTableRespons" +
+ "e\022/\n\010shutdown\022\020.ShutdownRequest\032\021.Shutdo" +
+ "wnResponse\0225\n\nstopMaster\022\022.StopMasterReq" +
+ "uest\032\023.StopMasterResponse\022,\n\007balance\022\017.B" +
+ "alanceRequest\032\020.BalanceResponse\022M\n\022setBa" +
+ "lancerRunning\022\032.SetBalancerRunningReques" +
+ "t\032\033.SetBalancerRunningResponse\022;\n\016runCat" +
+ "alogScan\022\023.CatalogScanRequest\032\024.CatalogS" +
+ "canResponse\022S\n\024enableCatalogJanitor\022\034.En",
+ "ableCatalogJanitorRequest\032\035.EnableCatalo" +
+ "gJanitorResponse\022\\\n\027isCatalogJanitorEnab" +
+ "led\022\037.IsCatalogJanitorEnabledRequest\032 .I" +
+ "sCatalogJanitorEnabledResponse\022L\n\021execMa" +
+ "sterService\022\032.CoprocessorServiceRequest\032" +
+ "\033.CoprocessorServiceResponse\0227\n\010snapshot" +
+ "\022\024.TakeSnapshotRequest\032\025.TakeSnapshotRes" +
+ "ponse\022D\n\025getCompletedSnapshots\022\024.ListSna" +
+ "pshotRequest\032\025.ListSnapshotResponse\022A\n\016d" +
+ "eleteSnapshot\022\026.DeleteSnapshotRequest\032\027.",
+ "DeleteSnapshotResponse\022A\n\016isSnapshotDone" +
+ "\022\026.IsSnapshotDoneRequest\032\027.IsSnapshotDon" +
+ "eResponse\022D\n\017restoreSnapshot\022\027.RestoreSn" +
+ "apshotRequest\032\030.RestoreSnapshotResponse\022" +
+ "V\n\025isRestoreSnapshotDone\022\035.IsRestoreSnap" +
+ "shotDoneRequest\032\036.IsRestoreSnapshotDoneR" +
+ "esponseBG\n*org.apache.hadoop.hbase.proto" +
+ "buf.generatedB\021MasterAdminProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -21776,8 +22864,24 @@
new java.lang.String[] { },
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionResponse.Builder.class);
+ internal_static_DispatchMergingRegionsRequest_descriptor =
+ getDescriptor().getMessageTypes().get(8);
+ internal_static_DispatchMergingRegionsRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_DispatchMergingRegionsRequest_descriptor,
+ new java.lang.String[] { "RegionA", "RegionB", "Forcible", },
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest.Builder.class);
+ internal_static_DispatchMergingRegionsResponse_descriptor =
+ getDescriptor().getMessageTypes().get(9);
+ internal_static_DispatchMergingRegionsResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_DispatchMergingRegionsResponse_descriptor,
+ new java.lang.String[] { },
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse.Builder.class);
internal_static_AssignRegionRequest_descriptor =
- getDescriptor().getMessageTypes().get(8);
+ getDescriptor().getMessageTypes().get(10);
internal_static_AssignRegionRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_AssignRegionRequest_descriptor,
@@ -21785,7 +22889,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest.Builder.class);
internal_static_AssignRegionResponse_descriptor =
- getDescriptor().getMessageTypes().get(9);
+ getDescriptor().getMessageTypes().get(11);
internal_static_AssignRegionResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_AssignRegionResponse_descriptor,
@@ -21793,7 +22897,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse.Builder.class);
internal_static_UnassignRegionRequest_descriptor =
- getDescriptor().getMessageTypes().get(10);
+ getDescriptor().getMessageTypes().get(12);
internal_static_UnassignRegionRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_UnassignRegionRequest_descriptor,
@@ -21801,7 +22905,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest.Builder.class);
internal_static_UnassignRegionResponse_descriptor =
- getDescriptor().getMessageTypes().get(11);
+ getDescriptor().getMessageTypes().get(13);
internal_static_UnassignRegionResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_UnassignRegionResponse_descriptor,
@@ -21809,7 +22913,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse.Builder.class);
internal_static_OfflineRegionRequest_descriptor =
- getDescriptor().getMessageTypes().get(12);
+ getDescriptor().getMessageTypes().get(14);
internal_static_OfflineRegionRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_OfflineRegionRequest_descriptor,
@@ -21817,7 +22921,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest.Builder.class);
internal_static_OfflineRegionResponse_descriptor =
- getDescriptor().getMessageTypes().get(13);
+ getDescriptor().getMessageTypes().get(15);
internal_static_OfflineRegionResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_OfflineRegionResponse_descriptor,
@@ -21825,7 +22929,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse.Builder.class);
internal_static_CreateTableRequest_descriptor =
- getDescriptor().getMessageTypes().get(14);
+ getDescriptor().getMessageTypes().get(16);
internal_static_CreateTableRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_CreateTableRequest_descriptor,
@@ -21833,7 +22937,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest.Builder.class);
internal_static_CreateTableResponse_descriptor =
- getDescriptor().getMessageTypes().get(15);
+ getDescriptor().getMessageTypes().get(17);
internal_static_CreateTableResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_CreateTableResponse_descriptor,
@@ -21841,7 +22945,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse.Builder.class);
internal_static_DeleteTableRequest_descriptor =
- getDescriptor().getMessageTypes().get(16);
+ getDescriptor().getMessageTypes().get(18);
internal_static_DeleteTableRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_DeleteTableRequest_descriptor,
@@ -21849,7 +22953,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest.Builder.class);
internal_static_DeleteTableResponse_descriptor =
- getDescriptor().getMessageTypes().get(17);
+ getDescriptor().getMessageTypes().get(19);
internal_static_DeleteTableResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_DeleteTableResponse_descriptor,
@@ -21857,7 +22961,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse.Builder.class);
internal_static_EnableTableRequest_descriptor =
- getDescriptor().getMessageTypes().get(18);
+ getDescriptor().getMessageTypes().get(20);
internal_static_EnableTableRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_EnableTableRequest_descriptor,
@@ -21865,7 +22969,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest.Builder.class);
internal_static_EnableTableResponse_descriptor =
- getDescriptor().getMessageTypes().get(19);
+ getDescriptor().getMessageTypes().get(21);
internal_static_EnableTableResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_EnableTableResponse_descriptor,
@@ -21873,7 +22977,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse.Builder.class);
internal_static_DisableTableRequest_descriptor =
- getDescriptor().getMessageTypes().get(20);
+ getDescriptor().getMessageTypes().get(22);
internal_static_DisableTableRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_DisableTableRequest_descriptor,
@@ -21881,7 +22985,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest.Builder.class);
internal_static_DisableTableResponse_descriptor =
- getDescriptor().getMessageTypes().get(21);
+ getDescriptor().getMessageTypes().get(23);
internal_static_DisableTableResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_DisableTableResponse_descriptor,
@@ -21889,7 +22993,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse.Builder.class);
internal_static_ModifyTableRequest_descriptor =
- getDescriptor().getMessageTypes().get(22);
+ getDescriptor().getMessageTypes().get(24);
internal_static_ModifyTableRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ModifyTableRequest_descriptor,
@@ -21897,7 +23001,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest.Builder.class);
internal_static_ModifyTableResponse_descriptor =
- getDescriptor().getMessageTypes().get(23);
+ getDescriptor().getMessageTypes().get(25);
internal_static_ModifyTableResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ModifyTableResponse_descriptor,
@@ -21905,7 +23009,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse.Builder.class);
internal_static_ShutdownRequest_descriptor =
- getDescriptor().getMessageTypes().get(24);
+ getDescriptor().getMessageTypes().get(26);
internal_static_ShutdownRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ShutdownRequest_descriptor,
@@ -21913,7 +23017,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest.Builder.class);
internal_static_ShutdownResponse_descriptor =
- getDescriptor().getMessageTypes().get(25);
+ getDescriptor().getMessageTypes().get(27);
internal_static_ShutdownResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ShutdownResponse_descriptor,
@@ -21921,7 +23025,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse.Builder.class);
internal_static_StopMasterRequest_descriptor =
- getDescriptor().getMessageTypes().get(26);
+ getDescriptor().getMessageTypes().get(28);
internal_static_StopMasterRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_StopMasterRequest_descriptor,
@@ -21929,7 +23033,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest.Builder.class);
internal_static_StopMasterResponse_descriptor =
- getDescriptor().getMessageTypes().get(27);
+ getDescriptor().getMessageTypes().get(29);
internal_static_StopMasterResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_StopMasterResponse_descriptor,
@@ -21937,7 +23041,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse.Builder.class);
internal_static_BalanceRequest_descriptor =
- getDescriptor().getMessageTypes().get(28);
+ getDescriptor().getMessageTypes().get(30);
internal_static_BalanceRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_BalanceRequest_descriptor,
@@ -21945,7 +23049,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest.Builder.class);
internal_static_BalanceResponse_descriptor =
- getDescriptor().getMessageTypes().get(29);
+ getDescriptor().getMessageTypes().get(31);
internal_static_BalanceResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_BalanceResponse_descriptor,
@@ -21953,7 +23057,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse.Builder.class);
internal_static_SetBalancerRunningRequest_descriptor =
- getDescriptor().getMessageTypes().get(30);
+ getDescriptor().getMessageTypes().get(32);
internal_static_SetBalancerRunningRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SetBalancerRunningRequest_descriptor,
@@ -21961,7 +23065,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest.Builder.class);
internal_static_SetBalancerRunningResponse_descriptor =
- getDescriptor().getMessageTypes().get(31);
+ getDescriptor().getMessageTypes().get(33);
internal_static_SetBalancerRunningResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SetBalancerRunningResponse_descriptor,
@@ -21969,7 +23073,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse.Builder.class);
internal_static_CatalogScanRequest_descriptor =
- getDescriptor().getMessageTypes().get(32);
+ getDescriptor().getMessageTypes().get(34);
internal_static_CatalogScanRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_CatalogScanRequest_descriptor,
@@ -21977,7 +23081,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanRequest.Builder.class);
internal_static_CatalogScanResponse_descriptor =
- getDescriptor().getMessageTypes().get(33);
+ getDescriptor().getMessageTypes().get(35);
internal_static_CatalogScanResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_CatalogScanResponse_descriptor,
@@ -21985,7 +23089,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse.Builder.class);
internal_static_EnableCatalogJanitorRequest_descriptor =
- getDescriptor().getMessageTypes().get(34);
+ getDescriptor().getMessageTypes().get(36);
internal_static_EnableCatalogJanitorRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_EnableCatalogJanitorRequest_descriptor,
@@ -21993,7 +23097,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest.Builder.class);
internal_static_EnableCatalogJanitorResponse_descriptor =
- getDescriptor().getMessageTypes().get(35);
+ getDescriptor().getMessageTypes().get(37);
internal_static_EnableCatalogJanitorResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_EnableCatalogJanitorResponse_descriptor,
@@ -22001,7 +23105,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse.Builder.class);
internal_static_IsCatalogJanitorEnabledRequest_descriptor =
- getDescriptor().getMessageTypes().get(36);
+ getDescriptor().getMessageTypes().get(38);
internal_static_IsCatalogJanitorEnabledRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_IsCatalogJanitorEnabledRequest_descriptor,
@@ -22009,7 +23113,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest.Builder.class);
internal_static_IsCatalogJanitorEnabledResponse_descriptor =
- getDescriptor().getMessageTypes().get(37);
+ getDescriptor().getMessageTypes().get(39);
internal_static_IsCatalogJanitorEnabledResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_IsCatalogJanitorEnabledResponse_descriptor,
@@ -22017,7 +23121,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse.Builder.class);
internal_static_TakeSnapshotRequest_descriptor =
- getDescriptor().getMessageTypes().get(38);
+ getDescriptor().getMessageTypes().get(40);
internal_static_TakeSnapshotRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_TakeSnapshotRequest_descriptor,
@@ -22025,7 +23129,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotRequest.Builder.class);
internal_static_TakeSnapshotResponse_descriptor =
- getDescriptor().getMessageTypes().get(39);
+ getDescriptor().getMessageTypes().get(41);
internal_static_TakeSnapshotResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_TakeSnapshotResponse_descriptor,
@@ -22033,7 +23137,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse.Builder.class);
internal_static_ListSnapshotRequest_descriptor =
- getDescriptor().getMessageTypes().get(40);
+ getDescriptor().getMessageTypes().get(42);
internal_static_ListSnapshotRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ListSnapshotRequest_descriptor,
@@ -22041,7 +23145,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotRequest.Builder.class);
internal_static_ListSnapshotResponse_descriptor =
- getDescriptor().getMessageTypes().get(41);
+ getDescriptor().getMessageTypes().get(43);
internal_static_ListSnapshotResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ListSnapshotResponse_descriptor,
@@ -22049,7 +23153,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListSnapshotResponse.Builder.class);
internal_static_DeleteSnapshotRequest_descriptor =
- getDescriptor().getMessageTypes().get(42);
+ getDescriptor().getMessageTypes().get(44);
internal_static_DeleteSnapshotRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_DeleteSnapshotRequest_descriptor,
@@ -22057,7 +23161,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest.Builder.class);
internal_static_DeleteSnapshotResponse_descriptor =
- getDescriptor().getMessageTypes().get(43);
+ getDescriptor().getMessageTypes().get(45);
internal_static_DeleteSnapshotResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_DeleteSnapshotResponse_descriptor,
@@ -22065,7 +23169,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse.Builder.class);
internal_static_RestoreSnapshotRequest_descriptor =
- getDescriptor().getMessageTypes().get(44);
+ getDescriptor().getMessageTypes().get(46);
internal_static_RestoreSnapshotRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RestoreSnapshotRequest_descriptor,
@@ -22073,7 +23177,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest.Builder.class);
internal_static_RestoreSnapshotResponse_descriptor =
- getDescriptor().getMessageTypes().get(45);
+ getDescriptor().getMessageTypes().get(47);
internal_static_RestoreSnapshotResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RestoreSnapshotResponse_descriptor,
@@ -22081,7 +23185,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse.Builder.class);
internal_static_IsSnapshotDoneRequest_descriptor =
- getDescriptor().getMessageTypes().get(46);
+ getDescriptor().getMessageTypes().get(48);
internal_static_IsSnapshotDoneRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_IsSnapshotDoneRequest_descriptor,
@@ -22089,7 +23193,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest.Builder.class);
internal_static_IsSnapshotDoneResponse_descriptor =
- getDescriptor().getMessageTypes().get(47);
+ getDescriptor().getMessageTypes().get(49);
internal_static_IsSnapshotDoneResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_IsSnapshotDoneResponse_descriptor,
@@ -22097,7 +23201,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse.Builder.class);
internal_static_IsRestoreSnapshotDoneRequest_descriptor =
- getDescriptor().getMessageTypes().get(48);
+ getDescriptor().getMessageTypes().get(50);
internal_static_IsRestoreSnapshotDoneRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_IsRestoreSnapshotDoneRequest_descriptor,
@@ -22105,7 +23209,7 @@
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest.Builder.class);
internal_static_IsRestoreSnapshotDoneResponse_descriptor =
- getDescriptor().getMessageTypes().get(49);
+ getDescriptor().getMessageTypes().get(51);
internal_static_IsRestoreSnapshotDoneResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_IsRestoreSnapshotDoneResponse_descriptor,
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java (revision 0)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java (revision 0)
@@ -0,0 +1,424 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.*;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+import com.google.common.collect.ImmutableList;
+
+/**
+ * Test the {@link RegionMergeTransaction} class against two HRegions (as
+ * opposed to running cluster).
+ */
+@Category(SmallTests.class)
+public class TestRegionMergeTransaction {
+ private final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private final Path testdir = TEST_UTIL.getDataTestDir(this.getClass()
+ .getName());
+ private HRegion region_a;
+ private HRegion region_b;
+ private HRegion region_c;
+ private HLog wal;
+ private FileSystem fs;
+ // Start rows of region_a,region_b,region_c
+ private static final byte[] STARTROW_A = new byte[] { 'a', 'a', 'a' };
+ private static final byte[] STARTROW_B = new byte[] { 'g', 'g', 'g' };
+ private static final byte[] STARTROW_C = new byte[] { 'w', 'w', 'w' };
+ private static final byte[] ENDROW = new byte[] { '{', '{', '{' };
+ private static final byte[] CF = HConstants.CATALOG_FAMILY;
+
+ @Before
+ public void setup() throws IOException {
+ this.fs = FileSystem.get(TEST_UTIL.getConfiguration());
+ this.fs.delete(this.testdir, true);
+ this.wal = HLogFactory.createHLog(fs, this.testdir, "logs",
+ TEST_UTIL.getConfiguration());
+ this.region_a = createRegion(this.testdir, this.wal, STARTROW_A, STARTROW_B);
+ this.region_b = createRegion(this.testdir, this.wal, STARTROW_B, STARTROW_C);
+ this.region_c = createRegion(this.testdir, this.wal, STARTROW_C, ENDROW);
+ assert region_a != null && region_b != null && region_c != null;
+ TEST_UTIL.getConfiguration().setBoolean("hbase.testing.nocluster", true);
+ }
+
+ @After
+ public void teardown() throws IOException {
+ for (HRegion region : new HRegion[] { region_a, region_b, region_c }) {
+ if (region != null && !region.isClosed()) region.close();
+ if (this.fs.exists(region.getRegionDir())
+ && !this.fs.delete(region.getRegionDir(), true)) {
+ throw new IOException("Failed deleting of " + region.getRegionDir());
+ }
+ }
+ if (this.wal != null)
+ this.wal.closeAndDelete();
+ this.fs.delete(this.testdir, true);
+ }
+
+ /**
+ * Test straight prepare works. Tries to merge on {@link #region_a} and
+ * {@link #region_b}
+ * @throws IOException
+ */
+ @Test
+ public void testPrepare() throws IOException {
+ prepareOnGoodRegions();
+ }
+
+ private RegionMergeTransaction prepareOnGoodRegions() throws IOException {
+ RegionMergeTransaction mt = new RegionMergeTransaction(region_a, region_b,
+ false);
+ RegionMergeTransaction spyMT = Mockito.spy(mt);
+ doReturn(false).when(spyMT).hasMergeQualifierInMeta(null,
+ region_a.getRegionName());
+ doReturn(false).when(spyMT).hasMergeQualifierInMeta(null,
+ region_b.getRegionName());
+ assertTrue(spyMT.prepare(null));
+ return spyMT;
+ }
+
+ /**
+ * Test merging the same region
+ */
+ @Test
+ public void testPrepareWithSameRegion() throws IOException {
+ RegionMergeTransaction mt = new RegionMergeTransaction(this.region_a,
+ this.region_a, true);
+ assertFalse("should not merge the same region even if it is forcible ",
+ mt.prepare(null));
+ }
+
+ /**
+ * Test merging two not adjacent regions under a common merge
+ */
+ @Test
+ public void testPrepareWithRegionsNotAdjacent() throws IOException {
+ RegionMergeTransaction mt = new RegionMergeTransaction(this.region_a,
+ this.region_c, false);
+ assertFalse("should not merge two regions if they are adjacent except it is forcible",
+ mt.prepare(null));
+ }
+
+ /**
+ * Test merging two not adjacent regions under a compulsory merge
+ */
+ @Test
+ public void testPrepareWithRegionsNotAdjacentUnderCompulsory()
+ throws IOException {
+ RegionMergeTransaction mt = new RegionMergeTransaction(region_a, region_c,
+ true);
+ RegionMergeTransaction spyMT = Mockito.spy(mt);
+ doReturn(false).when(spyMT).hasMergeQualifierInMeta(null,
+ region_a.getRegionName());
+ doReturn(false).when(spyMT).hasMergeQualifierInMeta(null,
+ region_c.getRegionName());
+ assertTrue("Since focible is true, should merge two regions even if they are not adjacent",
+ spyMT.prepare(null));
+ }
+
+ /**
+ * Pass a reference store
+ */
+ @Test
+ public void testPrepareWithRegionsWithReference() throws IOException {
+ HStore storeMock = Mockito.mock(HStore.class);
+ when(storeMock.hasReferences()).thenReturn(true);
+ when(storeMock.getFamily()).thenReturn(new HColumnDescriptor("cf"));
+ when(storeMock.close()).thenReturn(ImmutableList.of());
+ this.region_a.stores.put(Bytes.toBytes(""), storeMock);
+ RegionMergeTransaction mt = new RegionMergeTransaction(this.region_a,
+ this.region_b, false);
+ assertFalse(
+ "a region should not be mergeable if it has instances of store file references",
+ mt.prepare(null));
+ }
+
+ @Test
+ public void testPrepareWithClosedRegion() throws IOException {
+ this.region_a.close();
+ RegionMergeTransaction mt = new RegionMergeTransaction(this.region_a,
+ this.region_b, false);
+ assertFalse(mt.prepare(null));
+ }
+
+ /**
+ * Test merging regions which are merged regions and has reference in META all
+ * the same
+ */
+ @Test
+ public void testPrepareWithRegionsWithMergeReference() throws IOException {
+ RegionMergeTransaction mt = new RegionMergeTransaction(region_a, region_b,
+ false);
+ RegionMergeTransaction spyMT = Mockito.spy(mt);
+ doReturn(true).when(spyMT).hasMergeQualifierInMeta(null,
+ region_a.getRegionName());
+ doReturn(true).when(spyMT).hasMergeQualifierInMeta(null,
+ region_b.getRegionName());
+ assertFalse(spyMT.prepare(null));
+ }
+
+ @Test
+ public void testWholesomeMerge() throws IOException {
+ final int rowCountOfRegionA = loadRegion(this.region_a, CF, true);
+ final int rowCountOfRegionB = loadRegion(this.region_b, CF, true);
+ assertTrue(rowCountOfRegionA > 0 && rowCountOfRegionB > 0);
+ assertEquals(rowCountOfRegionA, countRows(this.region_a));
+ assertEquals(rowCountOfRegionB, countRows(this.region_b));
+
+ // Start transaction.
+ RegionMergeTransaction mt = prepareOnGoodRegions();
+
+ // Run the execute. Look at what it returns.
+ Server mockServer = Mockito.mock(Server.class);
+ when(mockServer.getConfiguration())
+ .thenReturn(TEST_UTIL.getConfiguration());
+ HRegion mergedRegion = mt.execute(mockServer, null);
+ // Do some assertions about execution.
+ assertTrue(this.fs.exists(mt.getMergesDir()));
+ // Assert region_a and region_b is closed.
+ assertTrue(region_a.isClosed());
+ assertTrue(region_b.isClosed());
+
+ // Assert mergedir is empty -- because its content will have been moved out
+ // to be under the merged region dirs.
+ assertEquals(0, this.fs.listStatus(mt.getMergesDir()).length);
+ // Check merged region have correct key span.
+ assertTrue(Bytes.equals(this.region_a.getStartKey(),
+ mergedRegion.getStartKey()));
+ assertTrue(Bytes.equals(this.region_b.getEndKey(),
+ mergedRegion.getEndKey()));
+ // Count rows. merged region are already open
+ try {
+ int mergedRegionRowCount = countRows(mergedRegion);
+ assertEquals((rowCountOfRegionA + rowCountOfRegionB),
+ mergedRegionRowCount);
+ } finally {
+ HRegion.closeHRegion(mergedRegion);
+ }
+ // Assert the write lock is no longer held on region_a and region_b
+ assertTrue(!this.region_a.lock.writeLock().isHeldByCurrentThread());
+ assertTrue(!this.region_b.lock.writeLock().isHeldByCurrentThread());
+ }
+
+ @Test
+ public void testRollback() throws IOException {
+ final int rowCountOfRegionA = loadRegion(this.region_a, CF, true);
+ final int rowCountOfRegionB = loadRegion(this.region_b, CF, true);
+ assertTrue(rowCountOfRegionA > 0 && rowCountOfRegionB > 0);
+ assertEquals(rowCountOfRegionA, countRows(this.region_a));
+ assertEquals(rowCountOfRegionB, countRows(this.region_b));
+
+ // Start transaction.
+ RegionMergeTransaction mt = prepareOnGoodRegions();
+
+ when(mt.createMergedRegionFromMerges(region_a, region_b,
+ mt.getMergedRegionInfo())).thenThrow(
+ new MockedFailedMergedRegionCreation());
+
+ // Run the execute. Look at what it returns.
+ boolean expectedException = false;
+ Server mockServer = Mockito.mock(Server.class);
+ when(mockServer.getConfiguration())
+ .thenReturn(TEST_UTIL.getConfiguration());
+ try {
+ mt.execute(mockServer, null);
+ } catch (MockedFailedMergedRegionCreation e) {
+ expectedException = true;
+ }
+ assertTrue(expectedException);
+ // Run rollback
+ assertTrue(mt.rollback(null, null));
+
+ // Assert I can scan region_a and region_b.
+ int rowCountOfRegionA2 = countRows(this.region_a);
+ assertEquals(rowCountOfRegionA, rowCountOfRegionA2);
+ int rowCountOfRegionB2 = countRows(this.region_b);
+ assertEquals(rowCountOfRegionB, rowCountOfRegionB2);
+
+ // Assert rollback cleaned up stuff in fs
+ assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir,
+ mt.getMergedRegionInfo())));
+
+ assertTrue(!this.region_a.lock.writeLock().isHeldByCurrentThread());
+ assertTrue(!this.region_b.lock.writeLock().isHeldByCurrentThread());
+
+ // Now retry the merge but do not throw an exception this time.
+ assertTrue(mt.prepare(null));
+ HRegion mergedRegion = mt.execute(mockServer, null);
+ // Count rows. daughters are already open
+ // Count rows. merged region are already open
+ try {
+ int mergedRegionRowCount = countRows(mergedRegion);
+ assertEquals((rowCountOfRegionA + rowCountOfRegionB),
+ mergedRegionRowCount);
+ } finally {
+ HRegion.closeHRegion(mergedRegion);
+ }
+ // Assert the write lock is no longer held on region_a and region_b
+ assertTrue(!this.region_a.lock.writeLock().isHeldByCurrentThread());
+ assertTrue(!this.region_b.lock.writeLock().isHeldByCurrentThread());
+ }
+
+ @Test
+ public void testFailAfterPONR() throws IOException, KeeperException {
+ final int rowCountOfRegionA = loadRegion(this.region_a, CF, true);
+ final int rowCountOfRegionB = loadRegion(this.region_b, CF, true);
+ assertTrue(rowCountOfRegionA > 0 && rowCountOfRegionB > 0);
+ assertEquals(rowCountOfRegionA, countRows(this.region_a));
+ assertEquals(rowCountOfRegionB, countRows(this.region_b));
+
+ // Start transaction.
+ RegionMergeTransaction mt = prepareOnGoodRegions();
+ Mockito.doThrow(new MockedFailedMergedRegionOpen())
+ .when(mt)
+ .openMergedRegion((Server) Mockito.anyObject(),
+ (RegionServerServices) Mockito.anyObject(),
+ (HRegion) Mockito.anyObject());
+
+ // Run the execute. Look at what it returns.
+ boolean expectedException = false;
+ Server mockServer = Mockito.mock(Server.class);
+ when(mockServer.getConfiguration())
+ .thenReturn(TEST_UTIL.getConfiguration());
+ try {
+ mt.execute(mockServer, null);
+ } catch (MockedFailedMergedRegionOpen e) {
+ expectedException = true;
+ }
+ assertTrue(expectedException);
+ // Run rollback returns false that we should restart.
+ assertFalse(mt.rollback(null, null));
+ // Make sure that merged region is still in the filesystem, that
+ // they have not been removed; this is supposed to be the case if we go
+ // past point of no return.
+ Path tableDir = this.region_a.getTableDir();
+ Path mergedRegionDir = new Path(tableDir, mt.getMergedRegionInfo()
+ .getEncodedName());
+ assertTrue(TEST_UTIL.getTestFileSystem().exists(mergedRegionDir));
+ }
+
+ /**
+ * Exception used in this class only.
+ */
+ @SuppressWarnings("serial")
+ private class MockedFailedMergedRegionCreation extends IOException {
+ }
+
+ @SuppressWarnings("serial")
+ private class MockedFailedMergedRegionOpen extends IOException {
+ }
+
+ private HRegion createRegion(final Path testdir, final HLog wal,
+ final byte[] startrow, final byte[] endrow)
+ throws IOException {
+ // Make a region with start and end keys.
+ HTableDescriptor htd = new HTableDescriptor("table");
+ HColumnDescriptor hcd = new HColumnDescriptor(CF);
+ htd.addFamily(hcd);
+ HRegionInfo hri = new HRegionInfo(htd.getName(), startrow, endrow);
+ HRegion a = HRegion.createHRegion(hri, testdir,
+ TEST_UTIL.getConfiguration(), htd);
+ HRegion.closeHRegion(a);
+ return HRegion.openHRegion(testdir, hri, htd, wal,
+ TEST_UTIL.getConfiguration());
+ }
+
+ private int countRows(final HRegion r) throws IOException {
+ int rowcount = 0;
+ InternalScanner scanner = r.getScanner(new Scan());
+ try {
+ List kvs = new ArrayList();
+ boolean hasNext = true;
+ while (hasNext) {
+ hasNext = scanner.next(kvs);
+ if (!kvs.isEmpty())
+ rowcount++;
+ }
+ } finally {
+ scanner.close();
+ }
+ return rowcount;
+ }
+
+ /**
+ * Load region with rows from 'aaa' to 'zzz', skip the rows which are out of
+ * range of the region
+ * @param r Region
+ * @param f Family
+ * @param flush flush the cache if true
+ * @return Count of rows loaded.
+ * @throws IOException
+ */
+ private int loadRegion(final HRegion r, final byte[] f, final boolean flush)
+ throws IOException {
+ byte[] k = new byte[3];
+ int rowCount = 0;
+ for (byte b1 = 'a'; b1 <= 'z'; b1++) {
+ for (byte b2 = 'a'; b2 <= 'z'; b2++) {
+ for (byte b3 = 'a'; b3 <= 'z'; b3++) {
+ k[0] = b1;
+ k[1] = b2;
+ k[2] = b3;
+ if (!HRegion.rowIsInRange(r.getRegionInfo(), k)) {
+ continue;
+ }
+ Put put = new Put(k);
+ put.add(f, null, k);
+ if (r.getLog() == null)
+ put.setWriteToWAL(false);
+ r.put(put);
+ rowCount++;
+ }
+ }
+ if (flush) {
+ r.flushcache();
+ }
+ }
+ return rowCount;
+ }
+
+}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (revision 1460277)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (working copy)
@@ -516,8 +516,9 @@
void openDaughterRegion(final Server server, final HRegion daughter)
throws IOException, KeeperException {
HRegionInfo hri = daughter.getRegionInfo();
- LoggingProgressable reporter = server == null? null:
- new LoggingProgressable(hri, server.getConfiguration());
+ LoggingProgressable reporter = server == null ? null
+ : new LoggingProgressable(hri, server.getConfiguration().getLong(
+ "hbase.regionserver.split.daughter.open.log.interval", 10000));
daughter.openHRegion(reporter);
}
@@ -526,10 +527,9 @@
private long lastLog = -1;
private final long interval;
- LoggingProgressable(final HRegionInfo hri, final Configuration c) {
+ LoggingProgressable(final HRegionInfo hri, final long interval) {
this.hri = hri;
- this.interval = c.getLong("hbase.regionserver.split.daughter.open.log.interval",
- 10000);
+ this.interval = interval;
}
@Override
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java (revision 1460302)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java (working copy)
@@ -22,6 +22,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableDescriptors;
@@ -171,4 +172,16 @@
*/
public boolean registerService(Service instance);
+ /**
+ * Merge two regions. The real implementation is on the regionserver, master
+ * just move the regions together and send MERGE RPC to regionserver
+ * @param region_a region to merge
+ * @param region_b region to merge
+ * @param forcible true if do a compulsory merge, otherwise we will only merge
+ * two adjacent regions
+ * @throws IOException
+ */
+ public void dispatchMergingRegions(final HRegionInfo region_a,
+ final HRegionInfo region_b, final boolean forcible) throws IOException;
+
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java (revision 0)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java (revision 0)
@@ -0,0 +1,112 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.RemoteExceptionHandler;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.util.StringUtils;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Handles processing region merges. Put in a queue, owned by HRegionServer.
+ */
+@InterfaceAudience.Private
+class RegionMergeRequest implements Runnable {
+ static final Log LOG = LogFactory.getLog(RegionMergeRequest.class);
+ private final HRegion region_a;
+ private final HRegion region_b;
+ private final HRegionServer server;
+ private final boolean forcible;
+
+ RegionMergeRequest(HRegion a, HRegion b, HRegionServer hrs, boolean forcible) {
+ Preconditions.checkNotNull(hrs);
+ this.region_a = a;
+ this.region_b = b;
+ this.server = hrs;
+ this.forcible = forcible;
+ }
+
+ @Override
+ public String toString() {
+ return "MergeRequest,regions:" + region_a + ", " + region_b + ", forcible="
+ + forcible;
+ }
+
+ @Override
+ public void run() {
+ if (this.server.isStopping() || this.server.isStopped()) {
+ LOG.debug("Skipping merge because server is stopping="
+ + this.server.isStopping() + " or stopped=" + this.server.isStopped());
+ return;
+ }
+ try {
+ final long startTime = EnvironmentEdgeManager.currentTimeMillis();
+ RegionMergeTransaction mt = new RegionMergeTransaction(region_a,
+ region_b, forcible);
+ // If prepare does not return true, for some reason -- logged inside in
+ // the prepare call -- we are not ready to merge just now. Just return.
+ if (!mt.prepare(this.server)) return;
+ try {
+ mt.execute(this.server, this.server);
+ } catch (Exception e) {
+ if (this.server.isStopping() || this.server.isStopped()) {
+ LOG.info(
+ "Skip rollback/cleanup of failed merge of " + region_a + " and "
+ + region_b + " because server is"
+ + (this.server.isStopping() ? " stopping" : " stopped"), e);
+ return;
+ }
+ try {
+ LOG.warn("Running rollback/cleanup of failed merge of "
+ + region_a +" and "+ region_b + "; " + e.getMessage(), e);
+ if (mt.rollback(this.server, this.server)) {
+ LOG.info("Successful rollback of failed merge of "
+ + region_a +" and "+ region_b);
+ } else {
+ this.server.abort("Abort; we got an error after point-of-no-return"
+ + "when merging " + region_a + " and " + region_b);
+ }
+ } catch (RuntimeException ee) {
+ String msg = "Failed rollback of failed merge of "
+ + region_a +" and "+ region_b + " -- aborting server";
+ // If failed rollback, kill this server to avoid having a hole in
+ // table.
+ LOG.info(msg, ee);
+ this.server.abort(msg);
+ }
+ return;
+ }
+ LOG.info("Regions merged, META updated, and report to master. region_a="
+ + region_a + ", region_b=" + region_b + ",merged region="
+ + mt.getMergedRegionInfo().getRegionNameAsString()
+ + ". Region merge took "
+ + StringUtils.formatTimeDiff(EnvironmentEdgeManager.currentTimeMillis(), startTime));
+ } catch (IOException ex) {
+ LOG.error("Merge failed " + this,
+ RemoteExceptionHandler.checkIOException(ex));
+ server.checkFileSystem();
+ }
+ }
+}
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java (revision 1460277)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java (working copy)
@@ -234,6 +234,38 @@
}
/**
+ * Gets the result in META for the specified region.
+ * @param catalogTracker
+ * @param regionName
+ * @return result of the specified region
+ * @throws IOException
+ */
+ public static Result getRegionResult(CatalogTracker catalogTracker,
+ byte[] regionName) throws IOException {
+ Get get = new Get(regionName);
+ get.addFamily(HConstants.CATALOG_FAMILY);
+ return get(getCatalogHTable(catalogTracker), get);
+ }
+
+ /**
+ * Get regions from the merge qualifier of the specified merged region
+ * @return null if it doesn't contain merge qualifier, else two merge regions
+ * @throws IOException
+ */
+ public static Pair getRegionsFromMergeQualifier(
+ CatalogTracker catalogTracker, byte[] regionName) throws IOException {
+ Result result = getRegionResult(catalogTracker, regionName);
+ HRegionInfo mergeA = HRegionInfo.getHRegionInfo(result,
+ HConstants.MERGEA_QUALIFIER);
+ HRegionInfo mergeB = HRegionInfo.getHRegionInfo(result,
+ HConstants.MERGEB_QUALIFIER);
+ if (mergeA == null && mergeB == null) {
+ return null;
+ }
+ return new Pair(mergeA, mergeB);
+ }
+
+ /**
* Checks if the specified table exists. Looks at the META table hosted on
* the specified server.
* @param catalogTracker
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java (revision 1460277)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java (working copy)
@@ -85,6 +85,7 @@
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest;
@@ -1426,6 +1427,28 @@
}
}
+ /**
+ * A helper to merge regions using admin protocol. Send request to
+ * regionserver.
+ * @param admin
+ * @param region_a
+ * @param region_b
+ * @param forcible true if do a compulsory merge, otherwise we will only merge
+ * two adjacent regions
+ * @throws IOException
+ */
+ public static void mergeRegions(final AdminProtocol admin,
+ final HRegionInfo region_a, final HRegionInfo region_b,
+ final boolean forcible) throws IOException {
+ MergeRegionsRequest request = RequestConverter.buildMergeRegionsRequest(
+ region_a.getRegionName(), region_b.getRegionName(),forcible);
+ try {
+ admin.mergeRegions(null, request);
+ } catch (ServiceException se) {
+ throw ProtobufUtil.getRemoteException(se);
+ }
+ }
+
// End helpers for Admin
/*
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java (revision 0)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java (revision 0)
@@ -0,0 +1,297 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.LargeTests;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.base.Joiner;
+
+/**
+ * Like {@link TestRegionMergeTransaction} in that we're testing
+ * {@link RegionMergeTransaction} only the below tests are against a running
+ * cluster where {@link TestRegionMergeTransaction} is tests against bare
+ * {@link HRegion}.
+ */
+@Category(LargeTests.class)
+public class TestRegionMergeTransactionOnCluster {
+ private static final Log LOG = LogFactory
+ .getLog(TestRegionMergeTransactionOnCluster.class);
+ private static final int NB_SERVERS = 3;
+
+ private static final byte[] FAMILYNAME = Bytes.toBytes("fam");
+ private static final byte[] QUALIFIER = Bytes.toBytes("q");
+
+ private static byte[] ROW = Bytes.toBytes("testRow");
+ private static final int INITIAL_REGION_NUM = 10;
+ private static final int ROWSIZE = 200;
+ private static byte[][] ROWS = makeN(ROW, ROWSIZE);
+
+ private static int waitTime = 60 * 1000;
+
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+ private static HMaster master;
+ private static HBaseAdmin admin;
+
+ @BeforeClass
+ public static void beforeAllTests() throws Exception {
+ // Start a cluster
+ TEST_UTIL.startMiniCluster(NB_SERVERS);
+ MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
+ master = cluster.getMaster();
+ master.balanceSwitch(false);
+ admin = TEST_UTIL.getHBaseAdmin();
+ }
+
+ @AfterClass
+ public static void afterAllTests() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testWholesomeMerge() throws Exception {
+ LOG.info("Starting testWholesomeMerge");
+ final byte[] tableName = Bytes.toBytes("testWholesomeMerge");
+
+ // Create table and load data.
+ HTable table = createTableAndLoadData(master, tableName);
+ // Merge 1st and 2nd region
+ mergeRegionsAndVerifyRegionNum(master, tableName, 0, 1,
+ INITIAL_REGION_NUM - 1);
+
+ // Merge 2nd and 3th region
+ mergeRegionsAndVerifyRegionNum(master, tableName, 1, 2,
+ INITIAL_REGION_NUM - 2);
+
+ verifyRowCount(table, ROWSIZE);
+
+ table.close();
+
+ }
+
+ @Test
+ public void testCleanMergeReference() throws Exception {
+ LOG.info("Starting testCleanMergeReference");
+ admin.enableCatalogJanitor(false);
+ try {
+ final byte[] tableName = Bytes.toBytes("testCleanMergeReference");
+ // Create table and load data.
+ HTable table = createTableAndLoadData(master, tableName);
+ // Merge 1st and 2nd region
+ mergeRegionsAndVerifyRegionNum(master, tableName, 0, 1,
+ INITIAL_REGION_NUM - 1);
+ verifyRowCount(table, ROWSIZE);
+ table.close();
+
+ List> tableRegions = MetaReader
+ .getTableRegionsAndLocations(master.getCatalogTracker(),
+ Bytes.toString(tableName));
+ HRegionInfo mergedRegionInfo = tableRegions.get(0).getFirst();
+ HTableDescriptor tableDescritor = master.getTableDescriptors().get(
+ Bytes.toString(tableName));
+ Result mergedRegionResult = MetaReader.getRegionResult(
+ master.getCatalogTracker(), mergedRegionInfo.getRegionName());
+
+ // contains merge reference in META
+ assertTrue(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
+ HConstants.MERGEA_QUALIFIER) != null);
+ assertTrue(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
+ HConstants.MERGEB_QUALIFIER) != null);
+
+ // merging regions' directory are in the file system all the same
+ HRegionInfo regionA = HRegionInfo.getHRegionInfo(mergedRegionResult,
+ HConstants.MERGEA_QUALIFIER);
+ HRegionInfo regionB = HRegionInfo.getHRegionInfo(mergedRegionResult,
+ HConstants.MERGEB_QUALIFIER);
+ FileSystem fs = master.getMasterFileSystem().getFileSystem();
+ Path rootDir = master.getMasterFileSystem().getRootDir();
+
+ Path tabledir = new Path(rootDir, mergedRegionInfo.getTableNameAsString());
+ Path regionAdir = new Path(tabledir, regionA.getEncodedName());
+ Path regionBdir = new Path(tabledir, regionB.getEncodedName());
+ assertTrue(fs.exists(regionAdir));
+ assertTrue(fs.exists(regionBdir));
+
+ admin.compact(mergedRegionInfo.getRegionName());
+ // wait until merged region doesn't have reference file
+ long timeout = System.currentTimeMillis() + waitTime;
+ while (System.currentTimeMillis() < timeout) {
+ if (!HRegion.hasReferences(fs, rootDir, mergedRegionInfo,
+ tableDescritor)) {
+ break;
+ }
+ Thread.sleep(50);
+ }
+ assertFalse(HRegion.hasReferences(fs, rootDir, mergedRegionInfo,
+ tableDescritor));
+
+ // run CatalogJanitor to clean merge references in META and archive the
+ // files of merging regions
+ int cleaned = admin.runCatalogScan();
+ assertTrue(cleaned > 0);
+ assertFalse(fs.exists(regionAdir));
+ assertFalse(fs.exists(regionBdir));
+
+ mergedRegionResult = MetaReader.getRegionResult(
+ master.getCatalogTracker(), mergedRegionInfo.getRegionName());
+ assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
+ HConstants.MERGEA_QUALIFIER) != null);
+ assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
+ HConstants.MERGEB_QUALIFIER) != null);
+
+ } finally {
+ admin.enableCatalogJanitor(true);
+ }
+
+
+ }
+
+ private void mergeRegionsAndVerifyRegionNum(HMaster master, byte[] tablename,
+ int regionAnum, int regionBnum, int expectedRegionNum) throws Exception {
+ requestMergeRegion(master, tablename, regionAnum, regionBnum);
+ waitAndVerifyRegionNum(master, tablename, expectedRegionNum);
+ }
+
+ private void requestMergeRegion(HMaster master, byte[] tablename,
+ int regionAnum, int regionBnum) throws Exception {
+ List> tableRegions = MetaReader
+ .getTableRegionsAndLocations(master.getCatalogTracker(),
+ Bytes.toString(tablename));
+ TEST_UTIL.getHBaseAdmin().mergeRegions(
+ tableRegions.get(regionAnum).getFirst().getEncodedNameAsBytes(),
+ tableRegions.get(regionBnum).getFirst().getEncodedNameAsBytes(), false);
+ }
+
+ private void waitAndVerifyRegionNum(HMaster master, byte[] tablename,
+ int expectedRegionNum) throws Exception {
+ List> tableRegionsInMeta;
+ List tableRegionsInMaster;
+ long timeout = System.currentTimeMillis() + waitTime;
+ while (System.currentTimeMillis() < timeout) {
+ tableRegionsInMeta = MetaReader.getTableRegionsAndLocations(
+ master.getCatalogTracker(), Bytes.toString(tablename));
+ tableRegionsInMaster = master.getAssignmentManager().getRegionStates()
+ .getRegionsOfTable(tablename);
+ if (tableRegionsInMeta.size() == expectedRegionNum
+ && tableRegionsInMaster.size() == expectedRegionNum) {
+ break;
+ }
+ Thread.sleep(250);
+ }
+
+ tableRegionsInMeta = MetaReader.getTableRegionsAndLocations(
+ master.getCatalogTracker(), Bytes.toString(tablename));
+ LOG.info("Regions after merge:" + Joiner.on(',').join(tableRegionsInMeta));
+ assertEquals(expectedRegionNum, tableRegionsInMeta.size());
+ }
+
+ private HTable createTableAndLoadData(HMaster master, byte[] tablename)
+ throws Exception {
+ return createTableAndLoadData(master, tablename, INITIAL_REGION_NUM);
+ }
+
+ private HTable createTableAndLoadData(HMaster master, byte[] tablename,
+ int numRegions) throws Exception {
+ assertTrue("ROWSIZE must > numregions:" + numRegions, ROWSIZE > numRegions);
+ byte[][] splitRows = new byte[numRegions - 1][];
+ for (int i = 0; i < splitRows.length; i++) {
+ splitRows[i] = ROWS[(i + 1) * ROWSIZE / numRegions];
+ }
+
+ HTable table = TEST_UTIL.createTable(tablename, FAMILYNAME, splitRows);
+ loadData(table);
+ verifyRowCount(table, ROWSIZE);
+
+ // sleep here is an ugly hack to allow region transitions to finish
+ long timeout = System.currentTimeMillis() + waitTime;
+ List> tableRegions;
+ while (System.currentTimeMillis() < timeout) {
+ tableRegions = MetaReader.getTableRegionsAndLocations(
+ master.getCatalogTracker(), Bytes.toString(tablename));
+ if (tableRegions.size() == numRegions)
+ break;
+ Thread.sleep(250);
+ }
+
+ tableRegions = MetaReader.getTableRegionsAndLocations(
+ master.getCatalogTracker(), Bytes.toString(tablename));
+ LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));
+ assertEquals(numRegions, tableRegions.size());
+ return table;
+ }
+
+ private static byte[][] makeN(byte[] base, int n) {
+ byte[][] ret = new byte[n][];
+ for (int i = 0; i < n; i++) {
+ ret[i] = Bytes.add(base, Bytes.toBytes(String.format("%04d", i)));
+ }
+ return ret;
+ }
+
+ private void loadData(HTable table) throws IOException {
+ for (int i = 0; i < ROWSIZE; i++) {
+ Put put = new Put(ROWS[i]);
+ put.add(FAMILYNAME, QUALIFIER, Bytes.toBytes(i));
+ table.put(put);
+ }
+ }
+
+ private void verifyRowCount(HTable table, int expectedRegionNum)
+ throws IOException {
+ ResultScanner scanner = table.getScanner(new Scan());
+ int rowCount = 0;
+ while (scanner.next() != null) {
+ rowCount++;
+ }
+ assertEquals(expectedRegionNum, rowCount);
+ scanner.close();
+ }
+
+}
Index: hbase-protocol/src/main/protobuf/Admin.proto
===================================================================
--- hbase-protocol/src/main/protobuf/Admin.proto (revision 1460277)
+++ hbase-protocol/src/main/protobuf/Admin.proto (working copy)
@@ -141,6 +141,20 @@
message CompactRegionResponse {
}
+/**
+ * Merges the specified regions.
+ *
+ * This method currently closes the regions and then merges them
+ */
+message MergeRegionsRequest {
+ required RegionSpecifier regionA = 1;
+ required RegionSpecifier regionB = 2;
+ optional bool forcible = 3 [default = false];
+}
+
+message MergeRegionsResponse {
+}
+
message UUID {
required uint64 leastSigBits = 1;
required uint64 mostSigBits = 2;
@@ -240,6 +254,9 @@
rpc compactRegion(CompactRegionRequest)
returns(CompactRegionResponse);
+
+ rpc mergeRegions(MergeRegionsRequest)
+ returns(MergeRegionsResponse);
rpc replicateWALEntry(ReplicateWALEntryRequest)
returns(ReplicateWALEntryResponse);
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java (revision 1460277)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java (working copy)
@@ -102,4 +102,9 @@
* @return hbase executor service
*/
public ExecutorService getExecutorService();
+
+ /**
+ * @return The RegionServer's CatalogTracker
+ */
+ public CatalogTracker getCatalogTracker();
}
Index: hbase-protocol/src/main/protobuf/MasterAdmin.proto
===================================================================
--- hbase-protocol/src/main/protobuf/MasterAdmin.proto (revision 1460277)
+++ hbase-protocol/src/main/protobuf/MasterAdmin.proto (working copy)
@@ -63,6 +63,18 @@
message MoveRegionResponse {
}
+/**
+ * Dispatch merging the specified regions.
+ */
+message DispatchMergingRegionsRequest {
+ required RegionSpecifier regionA = 1;
+ required RegionSpecifier regionB = 2;
+ optional bool forcible = 3 [default = false];
+}
+
+message DispatchMergingRegionsResponse {
+}
+
message AssignRegionRequest {
required RegionSpecifier region = 1;
}
@@ -243,6 +255,10 @@
rpc moveRegion(MoveRegionRequest)
returns(MoveRegionResponse);
+ /** Master dispatch merging the regions */
+ rpc dispatchMergingRegions(DispatchMergingRegionsRequest)
+ returns(DispatchMergingRegionsResponse);
+
/** Assign a region to a server chosen at random. */
rpc assignRegion(AssignRegionRequest)
returns(AssignRegionResponse);
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1460277)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy)
@@ -19,9 +19,8 @@
package org.apache.hadoop.hbase.regionserver;
import java.io.EOFException;
+import java.io.FileNotFoundException;
import java.io.IOException;
-import java.io.FileNotFoundException;
-import org.apache.hadoop.fs.permission.FsPermission;
import java.io.InterruptedIOException;
import java.io.UnsupportedEncodingException;
import java.lang.reflect.Constructor;
@@ -70,6 +69,8 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CompoundConfiguration;
import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -120,7 +121,6 @@
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl.WriteEntry;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
@@ -636,6 +636,8 @@
// these directories here on open. We may be opening a region that was
// being split but we crashed in the middle of it all.
SplitTransaction.cleanupAnySplitDetritus(this);
+ RegionMergeTransaction.cleanupMergeDir(this.getFilesystem(),
+ RegionMergeTransaction.getMergeDir(this));
FSUtils.deleteDirectory(this.fs, new Path(regiondir, MERGEDIR));
this.writestate.setReadOnly(this.htableDescriptor.isReadOnly());
@@ -919,6 +921,24 @@
return isAvailable() && !hasReferences();
}
+ /**
+ * @return true if region is mergeable
+ */
+ public boolean isMergeable() {
+ if (!isAvailable()) {
+ LOG.debug("Region " + this.getRegionNameAsString()
+ + " is not mergeable because it is closing or closed");
+ return false;
+ }
+ if (hasReferences()) {
+ LOG.debug("Region " + this.getRegionNameAsString()
+ + " is not mergeable because it has references");
+ return false;
+ }
+
+ return true;
+ }
+
public boolean areWritesEnabled() {
synchronized(this.writestate) {
return this.writestate.writesEnabled;
@@ -4230,6 +4250,33 @@
}
/**
+ * Create a merged region given a temp directory with the region data.
+ * @param mergedRegionInfo
+ * @param region_b another merging region
+ * @param mergedTmpDir Directory that contains region files.
+ * @return merged hregion
+ * @throws IOException
+ */
+ HRegion createMergedRegionFromMerges(final HRegionInfo mergedRegionInfo,
+ final HRegion region_b, final Path mergedTmpDir) throws IOException {
+ HRegion r = HRegion.newHRegion(this.getTableDir(), this.getLog(), fs,
+ this.getBaseConf(), mergedRegionInfo, this.getTableDesc(),
+ this.rsServices);
+ r.readRequestsCount.set(this.getReadRequestsCount()
+ + region_b.getReadRequestsCount());
+ r.writeRequestsCount.set(this.getWriteRequestsCount()
+ + region_b.getWriteRequestsCount());
+ // Move the tmp dir in the expected location
+ if (mergedTmpDir != null && fs.exists(mergedTmpDir)) {
+ if (!fs.rename(mergedTmpDir, r.getRegionDir())) {
+ throw new IOException("Unable to rename " + mergedTmpDir + " to "
+ + r.getRegionDir());
+ }
+ }
+ return r;
+ }
+
+ /**
* Inserts a new region's meta information into the passed
* meta region. Used by the HMaster bootstrap code adding
* new table to META table.
@@ -4508,6 +4555,41 @@
}
/**
+ * Check whether region has Reference file
+ * @param fs
+ * @param rootDir
+ * @param region
+ * @param htd
+ * @return true if region has reference file
+ * @throws IOException
+ */
+ public static boolean hasReferences(final FileSystem fs,
+ final Path rootDir, final HRegionInfo region, final HTableDescriptor htd)
+ throws IOException {
+ Path tabledir = new Path(rootDir, region.getTableNameAsString());
+ boolean hasReference = false;
+ for (HColumnDescriptor family : htd.getFamilies()) {
+ Path p = HStore.getStoreHomedir(tabledir, region.getEncodedName(),
+ family.getName());
+ if (!fs.exists(p))
+ continue;
+ // Look for reference files. Call listStatus with anonymous instance of
+ // PathFilter.
+ FileStatus[] ps = FSUtils.listStatus(fs, p, new PathFilter() {
+ public boolean accept(Path path) {
+ return StoreFile.isReference(path);
+ }
+ });
+
+ if (ps != null && ps.length > 0) {
+ hasReference = true;
+ break;
+ }
+ }
+ return hasReference;
+ }
+
+ /**
* @return True if needs a major compaction.
* @throws IOException
*/
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java (revision 1460277)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java (working copy)
@@ -47,6 +47,7 @@
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest;
@@ -78,6 +79,7 @@
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest;
@@ -777,6 +779,26 @@
return builder.build();
}
+ /**
+ * Create a MergeRegionsRequest for the given regions
+ * @param regionA name of region a
+ * @param regionB name of region b
+ * @param forcible true if it is a compulsory merge
+ * @return a MergeRegionsRequest
+ */
+ public static MergeRegionsRequest buildMergeRegionsRequest(
+ final byte[] regionA, final byte[] regionB, final boolean forcible) {
+ MergeRegionsRequest.Builder builder = MergeRegionsRequest.newBuilder();
+ RegionSpecifier regionASpecifier = buildRegionSpecifier(
+ RegionSpecifierType.REGION_NAME, regionA);
+ RegionSpecifier regionBSpecifier = buildRegionSpecifier(
+ RegionSpecifierType.REGION_NAME, regionB);
+ builder.setRegionA(regionASpecifier);
+ builder.setRegionB(regionBSpecifier);
+ builder.setForcible(forcible);
+ return builder.build();
+ }
+
/**
* Create a CompactRegionRequest for a given region name
*
@@ -936,6 +958,18 @@
return builder.build();
}
+ public static DispatchMergingRegionsRequest buildDispatchMergingRegionsRequest(
+ final byte[] encodedNameOfRegionA, final byte[] encodedNameOfRegionB,
+ final boolean forcible) throws DeserializationException {
+ DispatchMergingRegionsRequest.Builder builder = DispatchMergingRegionsRequest.newBuilder();
+ builder.setRegionA(buildRegionSpecifier(
+ RegionSpecifierType.ENCODED_REGION_NAME, encodedNameOfRegionA));
+ builder.setRegionB(buildRegionSpecifier(
+ RegionSpecifierType.ENCODED_REGION_NAME, encodedNameOfRegionB));
+ builder.setForcible(forcible);
+ return builder.build();
+ }
+
/**
* Create a protocol buffer AssignRegionRequest
*
Index: hbase-client/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
===================================================================
--- hbase-client/src/main/java/org/apache/hadoop/hbase/executor/EventType.java (revision 1460277)
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/executor/EventType.java (working copy)
@@ -43,6 +43,8 @@
RS_ZK_REGION_SPLITTING (5, null), // RS has started a region split
RS_ZK_REGION_SPLIT (6, ExecutorType.MASTER_SERVER_OPERATIONS), // RS split has completed.
RS_ZK_REGION_FAILED_OPEN (7, ExecutorType.MASTER_CLOSE_REGION), // RS failed to open a region
+ RS_ZK_REGION_MERGING (8, null), // RS has started merging regions
+ RS_ZK_REGION_MERGE (9, ExecutorType.MASTER_SERVER_OPERATIONS), // RS region merge has completed.
// Messages originating from Master to RS
M_RS_OPEN_REGION (20, ExecutorType.RS_OPEN_REGION), // Master asking RS to open a region
@@ -53,6 +55,7 @@
M_RS_CLOSE_META (25, ExecutorType.RS_CLOSE_META), // Master asking RS to close meta
// Messages originating from Client to Master
+ C_M_MERGE_REGION (30, ExecutorType.MASTER_TABLE_OPERATIONS), // Client asking Master to merge regions
C_M_DELETE_TABLE (40, ExecutorType.MASTER_TABLE_OPERATIONS), // Client asking Master to delete a table
C_M_DISABLE_TABLE (41, ExecutorType.MASTER_TABLE_OPERATIONS), // Client asking Master to disable a table
C_M_ENABLE_TABLE (42, ExecutorType.MASTER_TABLE_OPERATIONS), // Client asking Master to enable a table
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java (revision 1460277)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java (working copy)
@@ -29,13 +29,13 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.client.AdminProtocol;
import org.apache.hadoop.hbase.client.ClientProtocol;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.master.TableLockManager.NullTableLockManager;
@@ -54,6 +54,8 @@
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
@@ -454,6 +456,13 @@
}
@Override
+ public MergeRegionsResponse mergeRegions(RpcController controller,
+ MergeRegionsRequest request) throws ServiceException {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
public CompactRegionResponse compactRegion(RpcController controller,
CompactRegionRequest request) throws ServiceException {
// TODO Auto-generated method stub
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java (revision 0)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DispatchMergingRegionHandler.java (revision 0)
@@ -0,0 +1,164 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.master.handler;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.executor.EventHandler;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.master.CatalogJanitor;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.RegionStates;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+/**
+ * Handles MERGE regions request on master: move the regions together(on the
+ * same regionserver) and send MERGE RPC to regionserver.
+ *
+ * NOTE:The real merge is executed on the regionserver
+ *
+ */
+@InterfaceAudience.Private
+public class DispatchMergingRegionHandler extends EventHandler {
+ private static final Log LOG = LogFactory.getLog(DispatchMergingRegionHandler.class);
+ private final MasterServices masterServices;
+ private final CatalogJanitor catalogJanitor;
+ private HRegionInfo region_a;
+ private HRegionInfo region_b;
+ private final boolean forcible;
+ private final int timeout;
+
+ public DispatchMergingRegionHandler(final MasterServices services,
+ final CatalogJanitor catalogJanitor, final HRegionInfo region_a,
+ final HRegionInfo region_b, final boolean forcible) {
+ super(services, EventType.C_M_MERGE_REGION);
+ this.masterServices = services;
+ this.catalogJanitor = catalogJanitor;
+ this.region_a = region_a;
+ this.region_b = region_b;
+ this.forcible = forcible;
+ this.timeout = server.getConfiguration().getInt(
+ "hbase.master.regionmerge.timeout", 30 * 1000);
+ }
+
+ @Override
+ public void process() throws IOException {
+ boolean regionAHasMergeQualifier = !catalogJanitor.cleanMergeQualifier(region_a);
+ if (regionAHasMergeQualifier
+ || !catalogJanitor.cleanMergeQualifier(region_b)) {
+ LOG.info("Skip merging regions " + region_a.getRegionNameAsString()
+ + ", " + region_b.getRegionNameAsString() + ", because region "
+ + (regionAHasMergeQualifier ? region_a.getEncodedName() : region_b
+ .getEncodedName()) + " has merge qualifier");
+ return;
+ }
+
+ RegionStates regionStates = masterServices.getAssignmentManager()
+ .getRegionStates();
+ ServerName region_a_location = regionStates.getRegionServerOfRegion(region_a);
+ ServerName region_b_location = regionStates.getRegionServerOfRegion(region_b);
+ if (region_a_location == null || region_b_location == null) {
+ LOG.info("Skip merging regions " + region_a.getRegionNameAsString()
+ + ", " + region_b.getRegionNameAsString() + ", because region "
+ + (region_a_location == null ? region_a.getEncodedName() : region_b
+ .getEncodedName()) + " is not online now");
+ return;
+ }
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
+ boolean onSameRS = region_a_location.equals(region_b_location);
+
+ // Make sure regions are on the same regionserver before send merge
+ // regions request to regionserver
+ if (!onSameRS) {
+ // Move region_b to region a's location, switch region_a and region_b if
+ // region_a's load lower than region_b's, so we will always move lower
+ // load region
+ RegionLoad loadOfRegionA = masterServices.getServerManager()
+ .getLoad(region_a_location).getRegionsLoad()
+ .get(region_a.getRegionName());
+ RegionLoad loadOfRegionB = masterServices.getServerManager()
+ .getLoad(region_b_location).getRegionsLoad()
+ .get(region_b.getRegionName());
+ if (loadOfRegionA != null && loadOfRegionB != null
+ && loadOfRegionA.getRequestsCount() < loadOfRegionB
+ .getRequestsCount()) {
+ // switch region_a and region_b
+ HRegionInfo tmpRegion = this.region_a;
+ this.region_a = this.region_b;
+ this.region_b = tmpRegion;
+ ServerName tmpLocation = region_a_location;
+ region_a_location = region_b_location;
+ region_b_location = tmpLocation;
+ }
+
+ RegionPlan regionPlan = new RegionPlan(region_b, region_b_location,
+ region_a_location);
+ masterServices.getAssignmentManager().balance(regionPlan);
+ while (!masterServices.isStopped()) {
+ try {
+ Thread.sleep(20);
+ region_b_location = masterServices.getAssignmentManager()
+ .getRegionStates().getRegionServerOfRegion(region_b);
+ onSameRS = region_a_location.equals(region_b_location);
+ if (onSameRS || !regionStates.isRegionInTransition(region_b)) {
+ // Regions are on the same RS, or region_b is not in
+ // RegionInTransition any more
+ break;
+ }
+ if ((EnvironmentEdgeManager.currentTimeMillis() - startTime) > timeout) break;
+ } catch (InterruptedException e) {
+ InterruptedIOException iioe = new InterruptedIOException();
+ iioe.initCause(e);
+ throw iioe;
+ }
+ }
+ }
+
+ if (onSameRS) {
+ try{
+ masterServices.getServerManager().sendRegionsMerge(region_a_location,
+ region_a, region_b, forcible);
+ LOG.info("Successfully send MERGE REGIONS RPC to server "
+ + region_a_location.toString() + " for region "
+ + region_a.getRegionNameAsString() + ","
+ + region_b.getRegionNameAsString() + ", focible=" + forcible);
+ } catch (IOException ie) {
+ LOG.info("Failed send MERGE REGIONS RPC to server "
+ + region_a_location.toString() + " for region "
+ + region_a.getRegionNameAsString() + ","
+ + region_b.getRegionNameAsString() + ", focible=" + forcible + ", "
+ + ie.getMessage());
+ }
+ } else {
+ LOG.info("Cancel merging regions " + region_a.getRegionNameAsString()
+ + ", " + region_b.getRegionNameAsString()
+ + ", because can't move them together after "
+ + (EnvironmentEdgeManager.currentTimeMillis() - startTime) + "ms");
+ }
+ }
+
+}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MergedRegionHandler.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MergedRegionHandler.java (revision 0)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MergedRegionHandler.java (revision 0)
@@ -0,0 +1,117 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.master.handler;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.executor.EventHandler;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.master.AssignmentManager;
+import org.apache.hadoop.hbase.zookeeper.ZKAssign;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.KeeperException.NoNodeException;
+
+/**
+ * Handles MERGE regions event on Master, master receive the merge report from
+ * the regionserver, then offline the merging regions and online the merged
+ * region.Here region_a sorts before region_b.
+ */
+@InterfaceAudience.Private
+public class MergedRegionHandler extends EventHandler implements
+ TotesHRegionInfo {
+ private static final Log LOG = LogFactory.getLog(MergedRegionHandler.class);
+ private final AssignmentManager assignmentManager;
+ private final HRegionInfo merged;
+ private final HRegionInfo region_a;
+ private final HRegionInfo region_b;
+ private final ServerName sn;
+
+ public MergedRegionHandler(Server server,
+ AssignmentManager assignmentManager, ServerName sn,
+ final List mergeRegions) {
+ super(server, EventType.RS_ZK_REGION_MERGE);
+ assert mergeRegions.size() == 3;
+ this.assignmentManager = assignmentManager;
+ this.merged = mergeRegions.get(0);
+ this.region_a = mergeRegions.get(1);
+ this.region_b = mergeRegions.get(2);
+ this.sn = sn;
+ }
+
+ @Override
+ public HRegionInfo getHRegionInfo() {
+ return this.merged;
+ }
+
+ @Override
+ public String toString() {
+ String name = "UnknownServerName";
+ if (server != null && server.getServerName() != null) {
+ name = server.getServerName().toString();
+ }
+ String mergedRegion = "UnknownRegion";
+ if (merged != null) {
+ mergedRegion = merged.getRegionNameAsString();
+ }
+ return getClass().getSimpleName() + "-" + name + "-" + getSeqid() + "-"
+ + mergedRegion;
+ }
+
+ @Override
+ public void process() {
+ String encodedRegionName = this.merged.getEncodedName();
+ LOG.debug("Handling MERGE event for " + encodedRegionName
+ + "; deleting node");
+
+ this.assignmentManager.handleRegionsMergeReport(this.sn, this.merged,
+ this.region_a, this.region_b);
+ // Remove region from ZK
+ try {
+
+ boolean successful = false;
+ while (!successful) {
+ // It's possible that the RS tickles in between the reading of the
+ // znode and the deleting, so it's safe to retry.
+ successful = ZKAssign.deleteNode(this.server.getZooKeeper(),
+ encodedRegionName, EventType.RS_ZK_REGION_MERGE);
+ }
+ } catch (KeeperException e) {
+ if (e instanceof NoNodeException) {
+ String znodePath = ZKUtil.joinZNode(
+ this.server.getZooKeeper().splitLogZNode, encodedRegionName);
+ LOG.debug("The znode " + znodePath
+ + " does not exist. May be deleted already.");
+ } else {
+ server.abort("Error deleting MERGE node in ZK for transition ZK node ("
+ + merged.getEncodedName() + ")", e);
+ }
+ }
+ LOG.info("Handled MERGE event; merged="
+ + this.merged.getRegionNameAsString() + " region_a="
+ + this.region_a.getRegionNameAsString() + "region_b="
+ + this.region_b.getRegionNameAsString());
+ }
+}
Index: hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
===================================================================
--- hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (revision 1460277)
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (working copy)
@@ -1172,6 +1172,23 @@
}
/**
+ * Create a table.
+ * @param tableName
+ * @param family
+ * @param splitRows
+ * @return An HTable instance for the created table.
+ * @throws IOException
+ */
+ public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows)
+ throws IOException {
+ HTableDescriptor desc = new HTableDescriptor(tableName);
+ HColumnDescriptor hcd = new HColumnDescriptor(family);
+ desc.addFamily(hcd);
+ getHBaseAdmin().createTable(desc, splitRows);
+ return new HTable(getConfiguration(), tableName);
+ }
+
+ /**
* Drop an existing table
* @param tableName existing table
*/
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java (revision 0)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java (revision 0)
@@ -0,0 +1,822 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.ListIterator;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.RegionTransition;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.regionserver.SplitTransaction.LoggingProgressable;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.zookeeper.ZKAssign;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.KeeperException.NodeExistsException;
+
+/**
+ * Executes region merge as a "transaction". It is similar with
+ * SplitTransaction. Call {@link #prepare(RegionServerServices)} to setup the
+ * transaction, {@link #execute(Server, RegionServerServices)} to run the
+ * transaction and {@link #rollback(Server, RegionServerServices)} to cleanup if
+ * execute fails.
+ *
+ *
+ * Here is an example of how you would use this class:
+ *
+ *
+ * RegionMergeTransaction mt = new RegionMergeTransaction(this.conf, parent, midKey)
+ * if (!mt.prepare(services)) return;
+ * try {
+ * mt.execute(server, services);
+ * } catch (IOException ioe) {
+ * try {
+ * mt.rollback(server, services);
+ * return;
+ * } catch (RuntimeException e) {
+ * myAbortable.abort("Failed merge, abort");
+ * }
+ * }
+ *
+ *
+ * This class is not thread safe. Caller needs ensure merge is run by one thread
+ * only.
+ */
+@InterfaceAudience.Private
+public class RegionMergeTransaction {
+ private static final Log LOG = LogFactory.getLog(RegionMergeTransaction.class);
+ private static final String MERGEDIR = ".merges";
+
+ // Merged region info
+ private HRegionInfo mergedRegionInfo;
+ // region_a sorts before region_b
+ private final HRegion region_a;
+ private final HRegion region_b;
+ // merges dir is under region_a
+ private final Path mergesdir;
+ private int znodeVersion = -1;
+ // We only merge adjacent regions if forcible is false
+ private final boolean forcible;
+
+ /**
+ * Types to add to the transaction journal. Each enum is a step in the merge
+ * transaction. Used to figure how much we need to rollback.
+ */
+ enum JournalEntry {
+ /**
+ * Set region as in transition, set it into MERGING state.
+ */
+ SET_MERGING_IN_ZK,
+ /**
+ * We created the temporary merge data directory.
+ */
+ CREATED_MERGE_DIR,
+ /**
+ * Closed the merging region A.
+ */
+ CLOSED_REGION_A,
+ /**
+ * The merging region A has been taken out of the server's online regions list.
+ */
+ OFFLINED_REGION_A,
+ /**
+ * Closed the merging region B.
+ */
+ CLOSED_REGION_B,
+ /**
+ * The merging region B has been taken out of the server's online regions list.
+ */
+ OFFLINED_REGION_B,
+ /**
+ * Started in on creation of the merged region.
+ */
+ STARTED_MERGED_REGION_CREATION,
+ /**
+ * Point of no return. If we got here, then transaction is not recoverable
+ * other than by crashing out the regionserver.
+ */
+ PONR
+ }
+
+ /*
+ * Journal of how far the merge transaction has progressed.
+ */
+ private final List journal = new ArrayList();
+
+ private static IOException closedByOtherException = new IOException(
+ "Failed to close region: already closed by another thread");
+
+ /**
+ * Constructor
+ * @param a region a to merge
+ * @param b region b to merge
+ * @param forcible if false, we will only merge adjacent regions
+ */
+ public RegionMergeTransaction(final HRegion a, final HRegion b,
+ final boolean forcible) {
+ if (a.getRegionInfo().compareTo(b.getRegionInfo()) <= 0) {
+ this.region_a = a;
+ this.region_b = b;
+ } else {
+ this.region_a = b;
+ this.region_b = a;
+ }
+ this.forcible = forcible;
+ this.mergesdir = getMergeDir(this.region_a);
+ }
+
+ /**
+ * Does checks on merge inputs.
+ * @param services
+ * @return true if the regions are mergeable else
+ * false if they are not (e.g. its already closed, etc.).
+ */
+ public boolean prepare(final RegionServerServices services) {
+ if (!region_a.getTableDesc().getNameAsString()
+ .equals(region_b.getTableDesc().getNameAsString())) {
+ LOG.info("Can't merge regions " + region_a + "," + region_b
+ + " because they do not belong to the same table");
+ return false;
+ }
+ if (region_a.getRegionInfo().equals(region_b.getRegionInfo())) {
+ LOG.info("Can't merge the same region " + region_a);
+ return false;
+ }
+ if (!forcible && !HRegionInfo.areAdjacent(region_a.getRegionInfo(),
+ region_b.getRegionInfo())) {
+ String msg = "Skip merging " + this.region_a.getRegionNameAsString()
+ + " and " + this.region_b.getRegionNameAsString()
+ + ", because they are not adjacent.";
+ LOG.info(msg);
+ return false;
+ }
+ if (!this.region_a.isMergeable() || !this.region_b.isMergeable()) {
+ return false;
+ }
+ try {
+ boolean regionAHasMergeQualifier = hasMergeQualifierInMeta(services,
+ region_a.getRegionName());
+ if (regionAHasMergeQualifier ||
+ hasMergeQualifierInMeta(services, region_b.getRegionName())) {
+ LOG.debug("Region " + (regionAHasMergeQualifier ? region_a.getRegionNameAsString()
+ : region_b.getRegionNameAsString())
+ + " is not mergeable because it has merge qualifier in META");
+ return false;
+ }
+ } catch (IOException e) {
+ LOG.warn("Failed judging whether merge transaction is available for "
+ + region_a.getRegionNameAsString() + " and "
+ + region_b.getRegionNameAsString(), e);
+ return false;
+ }
+
+ // WARN: make sure there is no parent region of the two merging regions in
+ // .META. If exists, fixing up daughters would cause daughter regions(we
+ // have merged one) online again when we restart master, so we should clear
+ // the parent region to prevent the above case
+ // Since HBASE-7721, we don't need fix up daughters any more. so here do
+ // nothing
+
+ this.mergedRegionInfo = getMergedRegionInfo(region_a.getRegionInfo(),
+ region_b.getRegionInfo());
+ return true;
+ }
+
+ /**
+ * Run the transaction.
+ * @param server Hosting server instance. Can be null when testing (won't try
+ * and update in zk if a null server)
+ * @param services Used to online/offline regions.
+ * @throws IOException If thrown, transaction failed. Call
+ * {@link #rollback(Server, RegionServerServices)}
+ * @return merged region
+ * @throws IOException
+ * @see #rollback(Server, RegionServerServices)
+ */
+ public HRegion execute(final Server server,
+ final RegionServerServices services) throws IOException {
+ HRegion mergedRegion = createMergedRegion(server, services);
+ openMergedRegion(server, services, mergedRegion);
+ transitionZKNode(server, services);
+ return mergedRegion;
+ }
+
+ /**
+ * Prepare the merged region and region files.
+ * @param server Hosting server instance. Can be null when testing (won't try
+ * and update in zk if a null server)
+ * @param services Used to online/offline regions.
+ * @return merged region
+ * @throws IOException If thrown, transaction failed. Call
+ * {@link #rollback(Server, RegionServerServices)}
+ */
+ HRegion createMergedRegion(final Server server,
+ final RegionServerServices services) throws IOException {
+ LOG.info("Starting merge of " + region_a + " and "
+ + region_b.getRegionNameAsString() + ", forcible=" + forcible);
+ if ((server != null && server.isStopped())
+ || (services != null && services.isStopping())) {
+ throw new IOException("Server is stopped or stopping");
+ }
+
+ // If true, no cluster to write meta edits to or to update znodes in.
+ boolean testing = server == null ? true : server.getConfiguration()
+ .getBoolean("hbase.testing.nocluster", false);
+
+ // Set ephemeral MERGING znode up in zk. Mocked servers sometimes don't
+ // have zookeeper so don't do zk stuff if server or zookeeper is null
+ if (server != null && server.getZooKeeper() != null) {
+ try {
+ createNodeMerging(server.getZooKeeper(), this.mergedRegionInfo,
+ server.getServerName());
+ } catch (KeeperException e) {
+ throw new IOException("Failed creating MERGING znode on "
+ + this.mergedRegionInfo.getRegionNameAsString(), e);
+ }
+ }
+ this.journal.add(JournalEntry.SET_MERGING_IN_ZK);
+ if (server != null && server.getZooKeeper() != null) {
+ try {
+ // Transition node from MERGING to MERGING after creating the merge
+ // node. Master will get the callback for node change only if the
+ // transition is successful.
+ // Note that if the transition fails then the rollback will delete the
+ // created znode as the journal entry SET_MERGING_IN_ZK is added.
+ this.znodeVersion = transitionNodeMerging(server.getZooKeeper(),
+ this.mergedRegionInfo, server.getServerName(), -1);
+ } catch (KeeperException e) {
+ throw new IOException("Failed setting MERGING znode on "
+ + this.mergedRegionInfo.getRegionNameAsString(), e);
+ }
+ }
+
+ createMergeDir(this.region_a.getFilesystem(), this.mergesdir);
+ this.journal.add(JournalEntry.CREATED_MERGE_DIR);
+
+ List hstoreFilesOfRegionA = closeAndOfflineRegion(
+ services, this.region_a, true, testing);
+ List hstoreFilesOfRegionB = closeAndOfflineRegion(
+ services, this.region_b, false, testing);
+
+ assert hstoreFilesOfRegionA != null && hstoreFilesOfRegionB != null;
+
+
+ //
+ // mergeStoreFiles creates merged region dirs under the region_a merges dir
+ // Nothing to unroll here if failure -- clean up of CREATE_MERGE_DIR will
+ // clean this up.
+ mergeStoreFiles(hstoreFilesOfRegionA, hstoreFilesOfRegionB);
+
+ // Log to the journal that we are creating merged region. We could fail
+ // halfway through. If we do, we could have left
+ // stuff in fs that needs cleanup -- a storefile or two. Thats why we
+ // add entry to journal BEFORE rather than AFTER the change.
+ this.journal.add(JournalEntry.STARTED_MERGED_REGION_CREATION);
+ HRegion mergedRegion = createMergedRegionFromMerges(this.region_a,
+ this.region_b, this.mergedRegionInfo);
+
+
+ // This is the point of no return. Similar with SplitTransaction.
+ // IF we reach the PONR then subsequent failures need to crash out this
+ // regionserver
+ this.journal.add(JournalEntry.PONR);
+
+ // Add merged region and delete region_a and region_b
+ // as an atomic update. See HBASE-7721. This update to META makes the region
+ // will determine whether the region is merged or not in case of failures.
+ // If it is successful, master will roll-forward, if not, master will
+ // rollback
+ if (!testing) {
+ MetaEditor.mergeRegions(server.getCatalogTracker(),
+ mergedRegion.getRegionInfo(), region_a.getRegionInfo(),
+ region_b.getRegionInfo(), server.getServerName());
+ }
+ return mergedRegion;
+ }
+
+ /**
+ * Create a merged region from the merges directory under region a. In order
+ * to mock it for tests, place it with a new method.
+ * @param a hri of region a
+ * @param b hri of region b
+ * @param mergedRegion hri of merged region
+ * @return merged HRegion.
+ * @throws IOException
+ */
+ HRegion createMergedRegionFromMerges(final HRegion a, final HRegion b,
+ final HRegionInfo mergedRegion) throws IOException {
+ return a.createMergedRegionFromMerges(mergedRegion, b, new Path(
+ this.mergesdir, mergedRegion.getEncodedName()));
+ }
+
+ /**
+ * Close the merging region and offline it in regionserver
+ * @param services
+ * @param region
+ * @param isRegionA true if it is merging region a, false if it is region b
+ * @param testing true if it is testing
+ * @return a list of store files
+ * @throws IOException
+ */
+ private List closeAndOfflineRegion(
+ final RegionServerServices services, final HRegion region,
+ final boolean isRegionA, final boolean testing) throws IOException {
+ List hstoreFilesToMerge = null;
+ Exception exceptionToThrow = null;
+ try {
+ hstoreFilesToMerge = region.close(false);
+ } catch (Exception e) {
+ exceptionToThrow = e;
+ }
+ if (exceptionToThrow == null && hstoreFilesToMerge == null) {
+ // The region was closed by a concurrent thread. We can't continue
+ // with the merge, instead we must just abandon the merge. If we
+ // reopen or merge this could cause problems because the region has
+ // probably already been moved to a different server, or is in the
+ // process of moving to a different server.
+ exceptionToThrow = closedByOtherException;
+ }
+ if (exceptionToThrow != closedByOtherException) {
+ this.journal.add(isRegionA ? JournalEntry.CLOSED_REGION_A
+ : JournalEntry.CLOSED_REGION_B);
+ }
+ if (exceptionToThrow != null) {
+ if (exceptionToThrow instanceof IOException)
+ throw (IOException) exceptionToThrow;
+ throw new IOException(exceptionToThrow);
+ }
+
+ if (!testing) {
+ services.removeFromOnlineRegions(region, null);
+ }
+ this.journal.add(isRegionA ? JournalEntry.OFFLINED_REGION_A
+ : JournalEntry.OFFLINED_REGION_B);
+ return hstoreFilesToMerge;
+ }
+
+ /**
+ * Get merged region info through the specified two regions
+ * @param a merging region A
+ * @param b merging region B
+ * @return the merged region info
+ */
+ public static HRegionInfo getMergedRegionInfo(final HRegionInfo a,
+ final HRegionInfo b) {
+ long rid = EnvironmentEdgeManager.currentTimeMillis();
+ // Regionid is timestamp. Merged region's id can't be less than that of
+ // merging regions else will insert at wrong location in .META.
+ if (rid < a.getRegionId() || rid < b.getRegionId()) {
+ LOG.warn("Clock skew; merging regions id are " + a.getRegionId()
+ + " and " + b.getRegionId() + ", but current time here is " + rid);
+ rid = Math.max(a.getRegionId(), b.getRegionId()) + 1;
+ }
+
+ byte[] startKey = null;
+ byte[] endKey = null;
+ if (a.compareTo(b) <= 0) {
+ startKey = a.getStartKey();
+ endKey = b.getEndKey();
+ } else {
+ startKey = b.getStartKey();
+ endKey = a.getEndKey();
+ }
+
+ // Merged region is sorted between two merging regions in META
+ HRegionInfo mergedRegionInfo = new HRegionInfo(a.getTableName(), startKey,
+ endKey, false, rid);
+ return mergedRegionInfo;
+ }
+
+ /**
+ * Perform time consuming opening of the merged region.
+ * @param server Hosting server instance. Can be null when testing (won't try
+ * and update in zk if a null server)
+ * @param services Used to online/offline regions.
+ * @param merged the merged region
+ * @throws IOException If thrown, transaction failed. Call
+ * {@link #rollback(Server, RegionServerServices)}
+ */
+ void openMergedRegion(final Server server,
+ final RegionServerServices services, HRegion merged) throws IOException {
+ boolean stopped = server != null && server.isStopped();
+ boolean stopping = services != null && services.isStopping();
+ if (stopped || stopping) {
+ LOG.info("Not opening merged region " + merged.getRegionNameAsString()
+ + " because stopping=" + stopping + ", stopped=" + stopped);
+ return;
+ }
+ HRegionInfo hri = merged.getRegionInfo();
+ LoggingProgressable reporter = server == null ? null
+ : new LoggingProgressable(hri, server.getConfiguration().getLong(
+ "hbase.regionserver.regionmerge.open.log.interval", 10000));
+ merged.openHRegion(reporter);
+
+ if (services != null) {
+ try {
+ services.postOpenDeployTasks(merged, server.getCatalogTracker());
+ services.addToOnlineRegions(merged);
+ } catch (KeeperException ke) {
+ throw new IOException(ke);
+ }
+ }
+
+ }
+
+ /**
+ * Finish off merge transaction, transition the zknode
+ * @param server Hosting server instance. Can be null when testing (won't try
+ * and update in zk if a null server)
+ * @param services Used to online/offline regions.
+ * @throws IOException If thrown, transaction failed. Call
+ * {@link #rollback(Server, RegionServerServices)}
+ */
+ void transitionZKNode(final Server server, final RegionServerServices services)
+ throws IOException {
+ if (server == null || server.getZooKeeper() == null) {
+ return;
+ }
+
+ // Tell master about merge by updating zk. If we fail, abort.
+ try {
+ this.znodeVersion = transitionNodeMerge(server.getZooKeeper(),
+ this.mergedRegionInfo, region_a.getRegionInfo(),
+ region_b.getRegionInfo(), server.getServerName(), this.znodeVersion);
+
+ long startTime = EnvironmentEdgeManager.currentTimeMillis();
+ int spins = 0;
+ // Now wait for the master to process the merge. We know it's done
+ // when the znode is deleted. The reason we keep tickling the znode is
+ // that it's possible for the master to miss an event.
+ do {
+ if (spins % 10 == 0) {
+ LOG.debug("Still waiting on the master to process the merge for "
+ + this.mergedRegionInfo.getEncodedName() + ", waited "
+ + (EnvironmentEdgeManager.currentTimeMillis() - startTime) + "ms");
+ }
+ Thread.sleep(100);
+ // When this returns -1 it means the znode doesn't exist
+ this.znodeVersion = tickleNodeMerge(server.getZooKeeper(),
+ this.mergedRegionInfo, region_a.getRegionInfo(),
+ region_b.getRegionInfo(), server.getServerName(), this.znodeVersion);
+ spins++;
+ } while (this.znodeVersion != -1 && !server.isStopped()
+ && !services.isStopping());
+ } catch (Exception e) {
+ if (e instanceof InterruptedException) {
+ Thread.currentThread().interrupt();
+ }
+ throw new IOException("Failed telling master about merge "
+ + mergedRegionInfo.getEncodedName(), e);
+ }
+
+
+ // Leaving here, the mergedir with its dross will be in place but since the
+ // merge was successful, just leave it; it'll be cleaned when region_a is
+ // cleaned up by CatalogJanitor on master
+ }
+
+ /**
+ * Create reference file(s) of merging regions under the region_a merges dir
+ * @param hstoreFilesOfRegionA
+ * @param hstoreFilesOfRegionB
+ * @throws IOException
+ */
+ private void mergeStoreFiles(List hstoreFilesOfRegionA,
+ List hstoreFilesOfRegionB)
+ throws IOException {
+ // Create reference file(s) of region A in mergdir
+ FileSystem fs = this.region_a.getFilesystem();
+ for (StoreFile storeFile : hstoreFilesOfRegionA) {
+ Path storedir = HStore.getStoreHomedir(this.mergesdir,
+ mergedRegionInfo.getEncodedName(), storeFile.getFamily());
+ StoreFile.split(fs, storedir, storeFile, this.region_a.getStartKey(),
+ true);
+ }
+
+ // Create reference file(s) of region B in mergedir
+ for (StoreFile storeFile : hstoreFilesOfRegionB) {
+ Path storedir = HStore.getStoreHomedir(this.mergesdir,
+ mergedRegionInfo.getEncodedName(), storeFile.getFamily());
+ StoreFile.split(fs, storedir, storeFile, this.region_b.getStartKey(),
+ true);
+ }
+ }
+
+ /**
+ * @param server Hosting server instance (May be null when testing).
+ * @param services Services of regionserver, used to online regions.
+ * @throws IOException If thrown, rollback failed. Take drastic action.
+ * @return True if we successfully rolled back, false if we got to the point
+ * of no return and so now need to abort the server to minimize
+ * damage.
+ */
+ public boolean rollback(final Server server,
+ final RegionServerServices services) throws IOException {
+ assert this.mergedRegionInfo != null;
+ boolean result = true;
+ FileSystem fs = this.region_a.getFilesystem();
+ ListIterator iterator = this.journal
+ .listIterator(this.journal.size());
+ // Iterate in reverse.
+ while (iterator.hasPrevious()) {
+ JournalEntry je = iterator.previous();
+ switch (je) {
+
+ case SET_MERGING_IN_ZK:
+ if (server != null && server.getZooKeeper() != null) {
+ cleanZK(server, this.mergedRegionInfo);
+ }
+ break;
+
+ case CREATED_MERGE_DIR:
+ this.region_a.writestate.writesEnabled = true;
+ this.region_b.writestate.writesEnabled = true;
+ cleanupMergeDir(fs, this.mergesdir);
+ break;
+
+ case CLOSED_REGION_A:
+ try {
+ // So, this returns a seqid but if we just closed and then reopened,
+ // we should be ok. On close, we flushed using sequenceid obtained
+ // from hosting regionserver so no need to propagate the sequenceid
+ // returned out of initialize below up into regionserver as we
+ // normally do.
+ this.region_a.initialize();
+ } catch (IOException e) {
+ LOG.error("Failed rollbacking CLOSED_REGION_A of region "
+ + this.region_a.getRegionNameAsString(), e);
+ throw new RuntimeException(e);
+ }
+ break;
+
+ case OFFLINED_REGION_A:
+ if (services != null)
+ services.addToOnlineRegions(this.region_a);
+ break;
+
+ case CLOSED_REGION_B:
+ try {
+ this.region_b.initialize();
+ } catch (IOException e) {
+ LOG.error("Failed rollbacking CLOSED_REGION_A of region "
+ + this.region_b.getRegionNameAsString(), e);
+ throw new RuntimeException(e);
+ }
+ break;
+
+ case OFFLINED_REGION_B:
+ if (services != null)
+ services.addToOnlineRegions(this.region_b);
+ break;
+
+ case STARTED_MERGED_REGION_CREATION:
+ cleanupMergedRegion(fs, region_a.getTableDir(),
+ this.mergedRegionInfo.getEncodedName());
+ break;
+
+ case PONR:
+ // We got to the point-of-no-return so we need to just abort. Return
+ // immediately. Do not clean up created merged regions.
+ return false;
+
+ default:
+ throw new RuntimeException("Unhandled journal entry: " + je);
+ }
+ }
+ return result;
+ }
+
+ HRegionInfo getMergedRegionInfo() {
+ return this.mergedRegionInfo;
+ }
+
+ // For unit testing.
+ Path getMergesDir() {
+ return this.mergesdir;
+ }
+
+ private static void cleanZK(final Server server, final HRegionInfo hri) {
+ try {
+ // Only delete if its in expected state; could have been hijacked.
+ ZKAssign.deleteNode(server.getZooKeeper(), hri.getEncodedName(),
+ EventType.RS_ZK_REGION_MERGING);
+ } catch (KeeperException.NoNodeException e) {
+ LOG.warn("Failed cleanup zk node of " + hri.getRegionNameAsString(), e);
+ } catch (KeeperException e) {
+ server.abort("Failed cleanup zk node of " + hri.getRegionNameAsString(),e);
+ }
+
+ }
+
+ /**
+ * Creates a new ephemeral node in the MERGING state for the merged region.
+ * Create it ephemeral in case regionserver dies mid-merge.
+ *
+ *
+ * Does not transition nodes from other states. If a node already exists for
+ * this region, a {@link NodeExistsException} will be thrown.
+ *
+ * @param zkw zk reference
+ * @param region region to be created as offline
+ * @param serverName server event originates from
+ * @return Version of znode created.
+ * @throws KeeperException
+ * @throws IOException
+ */
+ int createNodeMerging(final ZooKeeperWatcher zkw, final HRegionInfo region,
+ final ServerName serverName) throws KeeperException, IOException {
+ LOG.debug(zkw.prefix("Creating ephemeral node for "
+ + region.getEncodedName() + " in MERGING state"));
+ RegionTransition rt = RegionTransition.createRegionTransition(
+ EventType.RS_ZK_REGION_MERGING, region.getRegionName(), serverName);
+ String node = ZKAssign.getNodeName(zkw, region.getEncodedName());
+ if (!ZKUtil.createEphemeralNodeAndWatch(zkw, node, rt.toByteArray())) {
+ throw new IOException("Failed create of ephemeral " + node);
+ }
+ // Transition node from MERGING to MERGING and pick up version so we
+ // can be sure this znode is ours; version is needed deleting.
+ return transitionNodeMerging(zkw, region, serverName, -1);
+ }
+
+ /**
+ * Transitions an existing node for the specified region which is currently in
+ * the MERGING state to be in the MERGE state. Converts the ephemeral MERGING
+ * znode to an ephemeral MERGE node. Master cleans up MERGE znode when it
+ * reads it (or if we crash, zk will clean it up).
+ *
+ *
+ * Does not transition nodes from other states. If for some reason the node
+ * could not be transitioned, the method returns -1. If the transition is
+ * successful, the version of the node after transition is returned.
+ *
+ *
+ * This method can fail and return false for three different reasons:
+ *
+ * - Node for this region does not exist
+ * - Node for this region is not in MERGING state
+ * - After verifying MERGING state, update fails because of wrong version
+ * (this should never actually happen since an RS only does this transition
+ * following a transition to MERGING. if two RS are conflicting, one would
+ * fail the original transition to MERGING and not this transition)
+ *
+ *
+ *
+ * Does not set any watches.
+ *
+ *
+ * This method should only be used by a RegionServer when completing the open
+ * of merged region.
+ *
+ * @param zkw zk reference
+ * @param merged region to be transitioned to opened
+ * @param a merging region A
+ * @param b merging region B
+ * @param serverName server event originates from
+ * @param znodeVersion expected version of data before modification
+ * @return version of node after transition, -1 if unsuccessful transition
+ * @throws KeeperException if unexpected zookeeper exception
+ * @throws IOException
+ */
+ private static int transitionNodeMerge(ZooKeeperWatcher zkw,
+ HRegionInfo merged, HRegionInfo a, HRegionInfo b, ServerName serverName,
+ final int znodeVersion) throws KeeperException, IOException {
+ byte[] payload = HRegionInfo.toDelimitedByteArray(merged, a, b);
+ return ZKAssign.transitionNode(zkw, merged, serverName,
+ EventType.RS_ZK_REGION_MERGING, EventType.RS_ZK_REGION_MERGE,
+ znodeVersion, payload);
+ }
+
+ /**
+ *
+ * @param zkw zk reference
+ * @param parent region to be transitioned to merging
+ * @param serverName server event originates from
+ * @param version znode version
+ * @return version of node after transition, -1 if unsuccessful transition
+ * @throws KeeperException
+ * @throws IOException
+ */
+ int transitionNodeMerging(final ZooKeeperWatcher zkw,
+ final HRegionInfo parent, final ServerName serverName, final int version)
+ throws KeeperException, IOException {
+ return ZKAssign.transitionNode(zkw, parent, serverName,
+ EventType.RS_ZK_REGION_MERGING, EventType.RS_ZK_REGION_MERGING,
+ version);
+ }
+
+ private static int tickleNodeMerge(ZooKeeperWatcher zkw, HRegionInfo merged,
+ HRegionInfo a, HRegionInfo b, ServerName serverName,
+ final int znodeVersion) throws KeeperException, IOException {
+ byte[] payload = HRegionInfo.toDelimitedByteArray(a, b);
+ return ZKAssign.transitionNode(zkw, merged, serverName,
+ EventType.RS_ZK_REGION_MERGE, EventType.RS_ZK_REGION_MERGE,
+ znodeVersion, payload);
+ }
+
+ /**
+ * Checks if the given region has merge qualifier in .META.
+ * @param services
+ * @param regionName name of specified region
+ * @return true if the given region has merge qualifier in META.(It will be
+ * cleaned by CatalogJanitor)
+ * @throws IOException
+ */
+ boolean hasMergeQualifierInMeta(final RegionServerServices services,
+ final byte[] regionName) throws IOException {
+ // Get merge regions if it is a merged region and already has merge
+ // qualifier
+ Pair mergeRegions = MetaReader
+ .getRegionsFromMergeQualifier(services.getCatalogTracker(), regionName);
+ if (mergeRegions != null &&
+ (mergeRegions.getFirst() != null || mergeRegions.getSecond() != null)) {
+ // It has merge qualifier
+ return true;
+ }
+ return false;
+ }
+
+ static Path getMergeDir(final HRegion r) {
+ return new Path(r.getRegionDir(), MERGEDIR);
+ }
+
+ /**
+ * @param fs Filesystem to use
+ * @param mergedir Directory to store temporary merge data in
+ * @throws IOException If mergedir already exists or we fail to
+ * create it.
+ * @see #cleanupMergeDir(FileSystem, Path)
+ */
+ private static void createMergeDir(final FileSystem fs, final Path mergedir)
+ throws IOException {
+ if (fs.exists(mergedir)) {
+ LOG.info("The " + mergedir
+ + " directory exists. Hence deleting it to recreate it");
+ if (!fs.delete(mergedir, true)) {
+ throw new IOException("Failed deletion of " + mergedir
+ + " before creating them again.");
+ }
+ }
+ if (!fs.mkdirs(mergedir))
+ throw new IOException("Failed create of " + mergedir);
+ }
+
+ static void cleanupMergeDir(final FileSystem fs, final Path mergedir)
+ throws IOException {
+ // Mergedir may have been cleaned up by reopen of the parent dir.
+ deleteDir(fs, mergedir, false);
+ }
+
+ /**
+ * @param fs Filesystem to use
+ * @param dir Directory to delete
+ * @param mustPreExist If true, we'll throw exception if dir does
+ * not preexist, else we'll just pass.
+ * @throws IOException Thrown if we fail to delete passed dir
+ */
+ private static void deleteDir(final FileSystem fs, final Path dir,
+ final boolean mustPreExist) throws IOException {
+ if (!fs.exists(dir)) {
+ if (mustPreExist)
+ throw new IOException(dir.toString() + " does not exist!");
+ } else if (!fs.delete(dir, true)) {
+ throw new IOException("Failed delete of " + dir);
+ }
+ }
+
+ private static void cleanupMergedRegion(final FileSystem fs,
+ final Path tabledir, final String encodedName) throws IOException {
+ Path regiondir = HRegion.getRegionDir(tabledir, encodedName);
+ // Dir may not preexist.
+ deleteDir(fs, regiondir, false);
+ }
+}